Linux Audio

Check our new training course

Loading...
v3.15
 
   1#ifndef _LINUX_MM_H
   2#define _LINUX_MM_H
   3
   4#include <linux/errno.h>
   5
   6#ifdef __KERNEL__
   7
   8#include <linux/mmdebug.h>
   9#include <linux/gfp.h>
 
  10#include <linux/bug.h>
  11#include <linux/list.h>
  12#include <linux/mmzone.h>
  13#include <linux/rbtree.h>
  14#include <linux/atomic.h>
  15#include <linux/debug_locks.h>
  16#include <linux/mm_types.h>
 
  17#include <linux/range.h>
  18#include <linux/pfn.h>
 
  19#include <linux/bit_spinlock.h>
  20#include <linux/shrinker.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
  21
  22struct mempolicy;
  23struct anon_vma;
  24struct anon_vma_chain;
  25struct file_ra_state;
  26struct user_struct;
  27struct writeback_control;
 
  28
  29#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
 
 
 
 
 
  30extern unsigned long max_mapnr;
  31
  32static inline void set_max_mapnr(unsigned long limit)
  33{
  34	max_mapnr = limit;
  35}
  36#else
  37static inline void set_max_mapnr(unsigned long limit) { }
  38#endif
  39
  40extern unsigned long totalram_pages;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41extern void * high_memory;
  42extern int page_cluster;
 
  43
  44#ifdef CONFIG_SYSCTL
  45extern int sysctl_legacy_va_layout;
  46#else
  47#define sysctl_legacy_va_layout 0
  48#endif
  49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  50#include <asm/page.h>
  51#include <asm/pgtable.h>
  52#include <asm/processor.h>
  53
  54#ifndef __pa_symbol
  55#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
  56#endif
  57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58extern unsigned long sysctl_user_reserve_kbytes;
  59extern unsigned long sysctl_admin_reserve_kbytes;
  60
  61extern int sysctl_overcommit_memory;
  62extern int sysctl_overcommit_ratio;
  63extern unsigned long sysctl_overcommit_kbytes;
  64
  65extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
  66				    size_t *, loff_t *);
  67extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
  68				    size_t *, loff_t *);
 
 
  69
 
  70#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 
 
 
 
 
  71
  72/* to align the pointer to the (next) page boundary */
  73#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
  74
 
 
 
  75/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
  76#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
 
 
 
 
 
 
 
 
  77
  78/*
  79 * Linux kernel virtual memory manager primitives.
  80 * The idea being to have a "virtual" mm in the same way
  81 * we have a virtual fs - giving a cleaner interface to the
  82 * mm details, and allowing different kinds of memory mappings
  83 * (from shared memory to executable loading to arbitrary
  84 * mmap() functions).
  85 */
  86
  87extern struct kmem_cache *vm_area_cachep;
 
 
 
 
  88
  89#ifndef CONFIG_MMU
  90extern struct rb_root nommu_region_tree;
  91extern struct rw_semaphore nommu_region_sem;
  92
  93extern unsigned int kobjsize(const void *objp);
  94#endif
  95
  96/*
  97 * vm_flags in vm_area_struct, see mm_types.h.
 
  98 */
  99#define VM_NONE		0x00000000
 100
 101#define VM_READ		0x00000001	/* currently active flags */
 102#define VM_WRITE	0x00000002
 103#define VM_EXEC		0x00000004
 104#define VM_SHARED	0x00000008
 105
 106/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
 107#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
 108#define VM_MAYWRITE	0x00000020
 109#define VM_MAYEXEC	0x00000040
 110#define VM_MAYSHARE	0x00000080
 111
 112#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
 
 
 
 
 
 
 113#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 114#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
 115
 116#define VM_LOCKED	0x00002000
 117#define VM_IO           0x00004000	/* Memory mapped I/O or similar */
 118
 119					/* Used by sys_madvise() */
 120#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
 121#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
 122
 123#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
 124#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 
 125#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
 126#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 127#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 128#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 129#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
 
 130#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
 131
 132#ifdef CONFIG_MEM_SOFT_DIRTY
 133# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
 134#else
 135# define VM_SOFTDIRTY	0
 136#endif
 137
 138#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
 139#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
 140#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
 141#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
 142
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 143#if defined(CONFIG_X86)
 144# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
 145#elif defined(CONFIG_PPC)
 146# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
 147#elif defined(CONFIG_PARISC)
 148# define VM_GROWSUP	VM_ARCH_1
 149#elif defined(CONFIG_METAG)
 150# define VM_GROWSUP	VM_ARCH_1
 151#elif defined(CONFIG_IA64)
 152# define VM_GROWSUP	VM_ARCH_1
 
 
 153#elif !defined(CONFIG_MMU)
 154# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
 155#endif
 156
 
 
 
 
 
 
 
 
 157#ifndef VM_GROWSUP
 158# define VM_GROWSUP	VM_NONE
 159#endif
 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 161/* Bits set in the VMA until the stack is in its final location */
 162#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 163
 164#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 165#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 166#endif
 167
 
 
 168#ifdef CONFIG_STACK_GROWSUP
 169#define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 
 170#else
 171#define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 
 172#endif
 173
 
 
 
 
 
 
 174/*
 175 * Special vmas that are non-mergable, non-mlock()able.
 176 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
 177 */
 178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 179
 
 
 
 180/* This mask defines which mm->def_flags a process can inherit its parent */
 181#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
 182
 
 
 
 
 
 
 
 
 
 183/*
 184 * mapping from the currently active vm_flags protection bits (the
 185 * low four bits) to a page protection mask..
 186 */
 187extern pgprot_t protection_map[16];
 188
 189#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
 190#define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
 191#define FAULT_FLAG_MKWRITE	0x04	/* Fault was mkwrite of existing pte */
 192#define FAULT_FLAG_ALLOW_RETRY	0x08	/* Retry fault if blocking */
 193#define FAULT_FLAG_RETRY_NOWAIT	0x10	/* Don't drop mmap_sem and wait when retrying */
 194#define FAULT_FLAG_KILLABLE	0x20	/* The fault task is in SIGKILL killable region */
 195#define FAULT_FLAG_TRIED	0x40	/* second try */
 196#define FAULT_FLAG_USER		0x80	/* The fault originated in userspace */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197
 198/*
 199 * vm_fault is filled by the the pagefault handler and passed to the vma's
 200 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 201 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 202 *
 203 * pgoff should be used in favour of virtual_address, if possible. If pgoff
 204 * is used, one may implement ->remap_pages to get nonlinear mapping support.
 
 
 205 */
 206struct vm_fault {
 207	unsigned int flags;		/* FAULT_FLAG_xxx flags */
 208	pgoff_t pgoff;			/* Logical page offset based on vma */
 209	void __user *virtual_address;	/* Faulting virtual address */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210
 
 211	struct page *page;		/* ->fault handlers should return a
 212					 * page here, unless VM_FAULT_NOPAGE
 213					 * is set (which is also implied by
 214					 * VM_FAULT_ERROR).
 215					 */
 216	/* for ->map_pages() only */
 217	pgoff_t max_pgoff;		/* map pages for offset from pgoff till
 218					 * max_pgoff inclusive */
 219	pte_t *pte;			/* pte entry associated with ->pgoff */
 
 
 
 
 
 
 
 
 
 
 
 
 220};
 221
 222/*
 223 * These are the virtual MM functions - opening of an area, closing and
 224 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 225 * to the functions called when a no-page or a wp-page exception occurs. 
 226 */
 227struct vm_operations_struct {
 228	void (*open)(struct vm_area_struct * area);
 
 
 
 
 229	void (*close)(struct vm_area_struct * area);
 230	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 231	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 232
 233	/* notification that a previously read-only page is about to become
 234	 * writable, if an error is returned it will cause a SIGBUS */
 235	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 
 
 236
 237	/* called by access_process_vm when get_user_pages() fails, typically
 238	 * for use by special VMAs that can switch between memory and hardware
 
 239	 */
 240	int (*access)(struct vm_area_struct *vma, unsigned long addr,
 241		      void *buf, int len, int write);
 
 
 
 
 
 
 242#ifdef CONFIG_NUMA
 243	/*
 244	 * set_policy() op must add a reference to any non-NULL @new mempolicy
 245	 * to hold the policy upon return.  Caller should pass NULL @new to
 246	 * remove a policy and fall back to surrounding context--i.e. do not
 247	 * install a MPOL_DEFAULT policy, nor the task or system default
 248	 * mempolicy.
 249	 */
 250	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 251
 252	/*
 253	 * get_policy() op must add reference [mpol_get()] to any policy at
 254	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
 255	 * in mm/mempolicy.c will do this automatically.
 256	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
 257	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
 258	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
 259	 * must return NULL--i.e., do not "fallback" to task or system default
 260	 * policy.
 261	 */
 262	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
 263					unsigned long addr);
 264	int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
 265		const nodemask_t *to, unsigned long flags);
 266#endif
 267	/* called by sys_remap_file_pages() to populate non-linear mapping */
 268	int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
 269			   unsigned long size, pgoff_t pgoff);
 
 
 270};
 271
 272struct mmu_gather;
 273struct inode;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 274
 275#define page_private(page)		((page)->private)
 276#define set_page_private(page, v)	((page)->private = (v))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 277
 278/* It's valid only if the page is free path or free_list */
 279static inline void set_freepage_migratetype(struct page *page, int migratetype)
 280{
 281	page->index = migratetype;
 
 
 282}
 283
 284/* It's valid only if the page is free path or free_list */
 285static inline int get_freepage_migratetype(struct page *page)
 286{
 287	return page->index;
 
 
 
 
 
 
 
 288}
 289
 290/*
 291 * FIXME: take this include out, include page-flags.h in
 292 * files which need it (119 of them)
 
 293 */
 294#include <linux/page-flags.h>
 295#include <linux/huge_mm.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 296
 297/*
 298 * Methods to modify the page usage count.
 299 *
 300 * What counts for a page usage:
 301 * - cache mapping   (page->mapping)
 302 * - private data    (page->private)
 303 * - page mapped in a task's page tables, each mapping
 304 *   is counted separately
 305 *
 306 * Also, many kernel routines increase the page count before a critical
 307 * routine so they can be sure the page doesn't go away from under them.
 308 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 309
 310/*
 311 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 
 
 312 */
 313static inline int put_page_testzero(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 314{
 315	VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
 316	return atomic_dec_and_test(&page->_count);
 317}
 318
 319/*
 320 * Try to grab a ref unless the page has a refcount of zero, return false if
 321 * that is the case.
 322 * This can be called when MMU is off so it must not access
 323 * any of the virtual mappings.
 324 */
 325static inline int get_page_unless_zero(struct page *page)
 
 326{
 327	return atomic_inc_not_zero(&page->_count);
 328}
 329
 330/*
 331 * Try to drop a ref unless the page has a refcount of one, return false if
 332 * that is the case.
 333 * This is to make sure that the refcount won't become zero after this drop.
 334 * This can be called when MMU is off so it must not access
 335 * any of the virtual mappings.
 336 */
 337static inline int put_page_unless_one(struct page *page)
 
 338{
 339	return atomic_add_unless(&page->_count, -1, 1);
 
 340}
 341
 342extern int page_is_ram(unsigned long pfn);
 
 
 
 343
 344/* Support for virtually mapped pages */
 345struct page *vmalloc_to_page(const void *addr);
 346unsigned long vmalloc_to_pfn(const void *addr);
 
 347
 348/*
 349 * Determine if an address is within the vmalloc range
 350 *
 351 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 352 * is no special casing required.
 353 */
 354static inline int is_vmalloc_addr(const void *x)
 355{
 356#ifdef CONFIG_MMU
 357	unsigned long addr = (unsigned long)x;
 
 358
 359	return addr >= VMALLOC_START && addr < VMALLOC_END;
 360#else
 361	return 0;
 362#endif
 
 
 
 
 
 
 
 
 
 363}
 364#ifdef CONFIG_MMU
 365extern int is_vmalloc_or_module_addr(const void *x);
 366#else
 367static inline int is_vmalloc_or_module_addr(const void *x)
 368{
 369	return 0;
 
 
 
 
 
 
 
 
 
 370}
 371#endif
 372
 373extern void kvfree(const void *addr);
 
 
 
 
 
 
 
 
 
 374
 375static inline void compound_lock(struct page *page)
 376{
 377#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 378	VM_BUG_ON_PAGE(PageSlab(page), page);
 379	bit_spin_lock(PG_compound_lock, &page->flags);
 380#endif
 381}
 382
 383static inline void compound_unlock(struct page *page)
 384{
 385#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 386	VM_BUG_ON_PAGE(PageSlab(page), page);
 387	bit_spin_unlock(PG_compound_lock, &page->flags);
 388#endif
 389}
 390
 391static inline unsigned long compound_lock_irqsave(struct page *page)
 392{
 393	unsigned long uninitialized_var(flags);
 394#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 395	local_irq_save(flags);
 396	compound_lock(page);
 397#endif
 398	return flags;
 399}
 400
 401static inline void compound_unlock_irqrestore(struct page *page,
 402					      unsigned long flags)
 403{
 404#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 405	compound_unlock(page);
 406	local_irq_restore(flags);
 407#endif
 408}
 409
 410static inline struct page *compound_head(struct page *page)
 411{
 412	if (unlikely(PageTail(page))) {
 413		struct page *head = page->first_page;
 
 
 
 
 414
 415		/*
 416		 * page->first_page may be a dangling pointer to an old
 417		 * compound page, so recheck that it is still a tail
 418		 * page before returning.
 419		 */
 420		smp_rmb();
 421		if (likely(PageTail(page)))
 422			return head;
 423	}
 424	return page;
 425}
 426
 427/*
 428 * The atomic page->_mapcount, starts from -1: so that transitions
 429 * both from it and to it can be tracked, using atomic_inc_and_test
 430 * and atomic_add_negative(-1).
 431 */
 432static inline void page_mapcount_reset(struct page *page)
 433{
 434	atomic_set(&(page)->_mapcount, -1);
 435}
 436
 437static inline int page_mapcount(struct page *page)
 
 438{
 439	return atomic_read(&(page)->_mapcount) + 1;
 
 
 
 
 
 440}
 441
 442static inline int page_count(struct page *page)
 
 443{
 444	return atomic_read(&compound_head(page)->_count);
 445}
 446
 447#ifdef CONFIG_HUGETLB_PAGE
 448extern int PageHeadHuge(struct page *page_head);
 449#else /* CONFIG_HUGETLB_PAGE */
 450static inline int PageHeadHuge(struct page *page_head)
 451{
 
 
 
 
 
 
 452	return 0;
 453}
 454#endif /* CONFIG_HUGETLB_PAGE */
 455
 456static inline bool __compound_tail_refcounted(struct page *page)
 
 
 
 
 
 457{
 458	return !PageSlab(page) && !PageHeadHuge(page);
 459}
 460
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 461/*
 462 * This takes a head page as parameter and tells if the
 463 * tail page reference counting can be skipped.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 464 *
 465 * For this to be safe, PageSlab and PageHeadHuge must remain true on
 466 * any given page where they return true here, until all tail pins
 467 * have been released.
 
 468 */
 469static inline bool compound_tail_refcounted(struct page *page)
 470{
 471	VM_BUG_ON_PAGE(!PageHead(page), page);
 472	return __compound_tail_refcounted(page);
 
 473}
 474
 475static inline void get_huge_page_tail(struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 476{
 477	/*
 478	 * __split_huge_page_refcount() cannot run from under us.
 479	 */
 480	VM_BUG_ON_PAGE(!PageTail(page), page);
 481	VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
 482	VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
 483	if (compound_tail_refcounted(page->first_page))
 484		atomic_inc(&page->_mapcount);
 485}
 486
 487extern bool __get_page_tail(struct page *page);
 
 
 
 488
 489static inline void get_page(struct page *page)
 
 
 
 
 
 
 490{
 491	if (unlikely(PageTail(page)))
 492		if (likely(__get_page_tail(page)))
 493			return;
 494	/*
 495	 * Getting a normal page or the head of a compound page
 496	 * requires to already have an elevated page->_count.
 497	 */
 498	VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
 499	atomic_inc(&page->_count);
 500}
 501
 502static inline struct page *virt_to_head_page(const void *x)
 503{
 504	struct page *page = virt_to_page(x);
 505	return compound_head(page);
 
 506}
 507
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 508/*
 509 * Setup the page count before being freed into the page allocator for
 510 * the first time (boot or memory hotplug)
 
 
 511 */
 512static inline void init_page_count(struct page *page)
 
 
 
 
 
 
 
 
 513{
 514	atomic_set(&page->_count, 1);
 515}
 
 516
 517/*
 518 * PageBuddy() indicate that the page is free and in the buddy system
 519 * (see mm/page_alloc.c).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 520 *
 521 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
 522 * -2 so that an underflow of the page_mapcount() won't be mistaken
 523 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
 524 * efficiently by most CPU architectures.
 525 */
 526#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527
 528static inline int PageBuddy(struct page *page)
 
 
 
 
 
 529{
 530	return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
 531}
 532
 533static inline void __SetPageBuddy(struct page *page)
 534{
 535	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
 536	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
 
 537}
 538
 539static inline void __ClearPageBuddy(struct page *page)
 540{
 541	VM_BUG_ON_PAGE(!PageBuddy(page), page);
 542	atomic_set(&page->_mapcount, -1);
 
 543}
 544
 545void put_page(struct page *page);
 546void put_pages_list(struct list_head *pages);
 547
 548void split_page(struct page *page, unsigned int order);
 549int split_free_page(struct page *page);
 
 550
 551/*
 552 * Compound pages have a destructor function.  Provide a
 553 * prototype for that function and accessor functions.
 554 * These are _only_ valid on the head of a PG_compound page.
 555 */
 556typedef void compound_page_dtor(struct page *);
 557
 558static inline void set_compound_page_dtor(struct page *page,
 559						compound_page_dtor *dtor)
 560{
 561	page[1].lru.next = (void *)dtor;
 562}
 563
 564static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
 
 565{
 566	return (compound_page_dtor *)page[1].lru.next;
 567}
 568
 569static inline int compound_order(struct page *page)
 
 
 
 
 570{
 571	if (!PageHead(page))
 572		return 0;
 573	return (unsigned long)page[1].lru.prev;
 574}
 575
 576static inline void set_compound_order(struct page *page, unsigned long order)
 
 
 
 
 
 
 577{
 578	page[1].lru.prev = (void *)order;
 579}
 580
 581#ifdef CONFIG_MMU
 582/*
 583 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 584 * servicing faults for write access.  In the normal case, do always want
 585 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 586 * that do not have writing enabled, when used by access_process_vm.
 587 */
 588static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 589{
 590	if (likely(vma->vm_flags & VM_WRITE))
 591		pte = pte_mkwrite(pte);
 592	return pte;
 593}
 594
 595void do_set_pte(struct vm_area_struct *vma, unsigned long address,
 596		struct page *page, pte_t *pte, bool write, bool anon);
 
 
 
 597#endif
 598
 599/*
 600 * Multiple processes may "see" the same page. E.g. for untouched
 601 * mappings of /dev/null, all processes see the same page full of
 602 * zeroes, and text pages of executables and shared libraries have
 603 * only one copy in memory, at most, normally.
 604 *
 605 * For the non-reserved pages, page_count(page) denotes a reference count.
 606 *   page_count() == 0 means the page is free. page->lru is then used for
 607 *   freelist management in the buddy allocator.
 608 *   page_count() > 0  means the page has been allocated.
 609 *
 610 * Pages are allocated by the slab allocator in order to provide memory
 611 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 612 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 613 * unless a particular usage is carefully commented. (the responsibility of
 614 * freeing the kmalloc memory is the caller's, of course).
 615 *
 616 * A page may be used by anyone else who does a __get_free_page().
 617 * In this case, page_count still tracks the references, and should only
 618 * be used through the normal accessor functions. The top bits of page->flags
 619 * and page->virtual store page management information, but all other fields
 620 * are unused and could be used privately, carefully. The management of this
 621 * page is the responsibility of the one who allocated it, and those who have
 622 * subsequently been given references to it.
 623 *
 624 * The other pages (we may call them "pagecache pages") are completely
 625 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 626 * The following discussion applies only to them.
 627 *
 628 * A pagecache page contains an opaque `private' member, which belongs to the
 629 * page's address_space. Usually, this is the address of a circular list of
 630 * the page's disk buffers. PG_private must be set to tell the VM to call
 631 * into the filesystem to release these pages.
 632 *
 633 * A page may belong to an inode's memory mapping. In this case, page->mapping
 634 * is the pointer to the inode, and page->index is the file offset of the page,
 635 * in units of PAGE_CACHE_SIZE.
 636 *
 637 * If pagecache pages are not associated with an inode, they are said to be
 638 * anonymous pages. These may become associated with the swapcache, and in that
 639 * case PG_swapcache is set, and page->private is an offset into the swapcache.
 640 *
 641 * In either case (swapcache or inode backed), the pagecache itself holds one
 642 * reference to the page. Setting PG_private should also increment the
 643 * refcount. The each user mapping also has a reference to the page.
 644 *
 645 * The pagecache pages are stored in a per-mapping radix tree, which is
 646 * rooted at mapping->page_tree, and indexed by offset.
 647 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 648 * lists, we instead now tag pages as dirty/writeback in the radix tree.
 649 *
 650 * All pagecache pages may be subject to I/O:
 651 * - inode pages may need to be read from disk,
 652 * - inode pages which have been modified and are MAP_SHARED may need
 653 *   to be written back to the inode on disk,
 654 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 655 *   modified may need to be swapped out to swap space and (later) to be read
 656 *   back into memory.
 657 */
 658
 659/*
 660 * The zone field is never updated after free_area_init_core()
 661 * sets it, so none of the operations on it need to be atomic.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662 */
 
 
 
 
 
 
 
 
 
 
 663
 664/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
 665#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
 666#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
 667#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
 668#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 669
 670/*
 671 * Define the bit shifts to access each section.  For non-existent
 672 * sections we define the shift as 0; that plus a 0 mask ensures
 673 * the compiler will optimise away reference to them.
 
 
 
 
 
 
 674 */
 675#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
 676#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
 677#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
 678#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
 
 679
 680/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
 681#ifdef NODE_NOT_IN_PAGE_FLAGS
 682#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
 683#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
 684						SECTIONS_PGOFF : ZONES_PGOFF)
 685#else
 686#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
 687#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
 688						NODES_PGOFF : ZONES_PGOFF)
 689#endif
 690
 691#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 692
 693#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 694#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 695#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 696
 697#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
 698#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
 699#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
 700#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
 701#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
 702
 703static inline enum zone_type page_zonenum(const struct page *page)
 
 704{
 705	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 
 
 
 
 
 
 
 
 706}
 
 707
 708#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 709#define SECTION_IN_PAGE_FLAGS
 710#endif
 711
 712/*
 713 * The identification function is mainly used by the buddy allocator for
 714 * determining if two pages could be buddies. We are not really identifying
 715 * the zone since we could be using the section number id if we do not have
 716 * node id available in page flags.
 717 * We only guarantee that it will return the same value for two combinable
 718 * pages in a zone.
 719 */
 720static inline int page_zone_id(struct page *page)
 721{
 722	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 723}
 724
 725static inline int zone_to_nid(struct zone *zone)
 726{
 727#ifdef CONFIG_NUMA
 728	return zone->node;
 729#else
 730	return 0;
 731#endif
 732}
 733
 734#ifdef NODE_NOT_IN_PAGE_FLAGS
 735extern int page_to_nid(const struct page *page);
 736#else
 737static inline int page_to_nid(const struct page *page)
 738{
 739	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 740}
 741#endif
 742
 
 
 
 
 
 743#ifdef CONFIG_NUMA_BALANCING
 
 
 
 
 
 
 
 
 
 
 
 
 744static inline int cpu_pid_to_cpupid(int cpu, int pid)
 745{
 746	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
 747}
 748
 749static inline int cpupid_to_pid(int cpupid)
 750{
 751	return cpupid & LAST__PID_MASK;
 752}
 753
 754static inline int cpupid_to_cpu(int cpupid)
 755{
 756	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
 757}
 758
 759static inline int cpupid_to_nid(int cpupid)
 760{
 761	return cpu_to_node(cpupid_to_cpu(cpupid));
 762}
 763
 764static inline bool cpupid_pid_unset(int cpupid)
 765{
 766	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
 767}
 768
 769static inline bool cpupid_cpu_unset(int cpupid)
 770{
 771	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
 772}
 773
 774static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
 775{
 776	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
 777}
 778
 779#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
 780#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 781static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 782{
 783	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
 784}
 785
 786static inline int page_cpupid_last(struct page *page)
 787{
 788	return page->_last_cpupid;
 789}
 790static inline void page_cpupid_reset_last(struct page *page)
 791{
 792	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
 793}
 794#else
 795static inline int page_cpupid_last(struct page *page)
 796{
 797	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
 798}
 799
 800extern int page_cpupid_xchg_last(struct page *page, int cpupid);
 801
 802static inline void page_cpupid_reset_last(struct page *page)
 803{
 804	int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
 805
 806	page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
 807	page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
 808}
 809#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 810#else /* !CONFIG_NUMA_BALANCING */
 811static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 812{
 813	return page_to_nid(page); /* XXX */
 814}
 815
 816static inline int page_cpupid_last(struct page *page)
 817{
 818	return page_to_nid(page); /* XXX */
 
 
 
 
 
 819}
 820
 821static inline int cpupid_to_nid(int cpupid)
 822{
 823	return -1;
 824}
 825
 826static inline int cpupid_to_pid(int cpupid)
 827{
 828	return -1;
 829}
 830
 831static inline int cpupid_to_cpu(int cpupid)
 832{
 833	return -1;
 834}
 835
 836static inline int cpu_pid_to_cpupid(int nid, int pid)
 837{
 838	return -1;
 839}
 840
 841static inline bool cpupid_pid_unset(int cpupid)
 842{
 843	return 1;
 844}
 845
 846static inline void page_cpupid_reset_last(struct page *page)
 847{
 848}
 849
 850static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
 851{
 852	return false;
 853}
 
 
 
 
 
 
 
 
 854#endif /* CONFIG_NUMA_BALANCING */
 855
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 856static inline struct zone *page_zone(const struct page *page)
 857{
 858	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
 859}
 860
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 861#ifdef SECTION_IN_PAGE_FLAGS
 862static inline void set_page_section(struct page *page, unsigned long section)
 863{
 864	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
 865	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
 866}
 867
 868static inline unsigned long page_to_section(const struct page *page)
 869{
 870	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
 871}
 872#endif
 873
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 874static inline void set_page_zone(struct page *page, enum zone_type zone)
 875{
 876	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
 877	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
 878}
 879
 880static inline void set_page_node(struct page *page, unsigned long node)
 881{
 882	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
 883	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
 884}
 885
 886static inline void set_page_links(struct page *page, enum zone_type zone,
 887	unsigned long node, unsigned long pfn)
 888{
 889	set_page_zone(page, zone);
 890	set_page_node(page, node);
 891#ifdef SECTION_IN_PAGE_FLAGS
 892	set_page_section(page, pfn_to_section_nr(pfn));
 893#endif
 894}
 895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896/*
 897 * Some inline functions in vmstat.h depend on page_zone()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 898 */
 899#include <linux/vmstat.h>
 
 
 
 900
 901static __always_inline void *lowmem_page_address(const struct page *page)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 902{
 903	return __va(PFN_PHYS(page_to_pfn(page)));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 904}
 905
 
 
 
 
 
 
 
 
 
 
 
 
 906#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
 907#define HASHED_PAGE_VIRTUAL
 908#endif
 909
 910#if defined(WANT_PAGE_VIRTUAL)
 911static inline void *page_address(const struct page *page)
 912{
 913	return page->virtual;
 914}
 915static inline void set_page_address(struct page *page, void *address)
 916{
 917	page->virtual = address;
 918}
 919#define page_address_init()  do { } while(0)
 920#endif
 921
 922#if defined(HASHED_PAGE_VIRTUAL)
 923void *page_address(const struct page *page);
 924void set_page_address(struct page *page, void *virtual);
 925void page_address_init(void);
 926#endif
 927
 
 
 
 
 
 928#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
 929#define page_address(page) lowmem_page_address(page)
 930#define set_page_address(page, address)  do { } while(0)
 931#define page_address_init()  do { } while(0)
 932#endif
 933
 934/*
 935 * On an anonymous page mapped into a user virtual memory area,
 936 * page->mapping points to its anon_vma, not to a struct address_space;
 937 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 938 *
 939 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 940 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
 941 * and then page->mapping points, not to an anon_vma, but to a private
 942 * structure which KSM associates with that merged page.  See ksm.h.
 943 *
 944 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
 945 *
 946 * Please note that, confusingly, "page_mapping" refers to the inode
 947 * address_space which maps the page from disk; whereas "page_mapped"
 948 * refers to user virtual address space into which the page is mapped.
 949 */
 950#define PAGE_MAPPING_ANON	1
 951#define PAGE_MAPPING_KSM	2
 952#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
 953
 954extern struct address_space *page_mapping(struct page *page);
 955
 956/* Neutral page->mapping pointer to address_space or anon_vma or other */
 957static inline void *page_rmapping(struct page *page)
 958{
 959	return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
 960}
 961
 962extern struct address_space *__page_file_mapping(struct page *);
 963
 964static inline
 965struct address_space *page_file_mapping(struct page *page)
 966{
 967	if (unlikely(PageSwapCache(page)))
 968		return __page_file_mapping(page);
 969
 970	return page->mapping;
 971}
 972
 973static inline int PageAnon(struct page *page)
 974{
 975	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 976}
 977
 978/*
 979 * Return the pagecache index of the passed page.  Regular pagecache pages
 980 * use ->index whereas swapcache pages use ->private
 
 981 */
 982static inline pgoff_t page_index(struct page *page)
 983{
 984	if (unlikely(PageSwapCache(page)))
 985		return page_private(page);
 986	return page->index;
 
 
 
 987}
 988
 989extern pgoff_t __page_file_index(struct page *page);
 990
 991/*
 992 * Return the file index of the page. Regular pagecache pages use ->index
 993 * whereas swapcache pages use swp_offset(->private)
 
 994 */
 995static inline pgoff_t page_file_index(struct page *page)
 996{
 997	if (unlikely(PageSwapCache(page)))
 998		return __page_file_index(page);
 999
1000	return page->index;
 
 
1001}
1002
1003/*
1004 * Return true if this page is mapped into pagetables.
 
1005 */
1006static inline int page_mapped(struct page *page)
1007{
1008	return atomic_read(&(page)->_mapcount) >= 0;
1009}
1010
1011/*
1012 * Different kinds of faults, as returned by handle_mm_fault().
1013 * Used to decide whether a process gets delivered SIGBUS or
1014 * just gets major/minor fault counters bumped up.
1015 */
1016
1017#define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */
1018
1019#define VM_FAULT_OOM	0x0001
1020#define VM_FAULT_SIGBUS	0x0002
1021#define VM_FAULT_MAJOR	0x0004
1022#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
1023#define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
1024#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
1025
1026#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
1027#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
1028#define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
1029#define VM_FAULT_FALLBACK 0x0800	/* huge page fault failed, fall back to small */
1030
1031#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1032
1033#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
1034			 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
1035
1036/* Encode hstate index for a hwpoisoned large page */
1037#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1038#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1039
1040/*
1041 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1042 */
1043extern void pagefault_out_of_memory(void);
1044
1045#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
 
 
1046
1047/*
1048 * Flags passed to show_mem() and show_free_areas() to suppress output in
1049 * various contexts.
1050 */
1051#define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
1052
1053extern void show_free_areas(unsigned int flags);
1054extern bool skip_free_areas_node(unsigned int flags, int nid);
 
1055
1056int shmem_zero_setup(struct vm_area_struct *);
1057#ifdef CONFIG_SHMEM
1058bool shmem_mapping(struct address_space *mapping);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1059#else
1060static inline bool shmem_mapping(struct address_space *mapping)
 
 
 
 
1061{
1062	return false;
 
 
 
 
 
1063}
1064#endif
1065
1066extern int can_do_mlock(void);
1067extern int user_shm_lock(size_t, struct user_struct *);
1068extern void user_shm_unlock(size_t, struct user_struct *);
1069
1070/*
1071 * Parameter block passed down to zap_pte_range in exceptional cases.
1072 */
1073struct zap_details {
1074	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
1075	struct address_space *check_mapping;	/* Check page->mapping if set */
1076	pgoff_t	first_index;			/* Lowest page->index to unmap */
1077	pgoff_t last_index;			/* Highest page->index to unmap */
1078};
1079
 
 
1080struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1081		pte_t pte);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082
1083int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1084		unsigned long size);
1085void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1086		unsigned long size, struct zap_details *);
1087void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1088		unsigned long start, unsigned long end);
1089
1090/**
1091 * mm_walk - callbacks for walk_page_range
1092 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
1093 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1094 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1095 *	       this handler is required to be able to handle
1096 *	       pmd_trans_huge() pmds.  They may simply choose to
1097 *	       split_huge_page() instead of handling it explicitly.
1098 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1099 * @pte_hole: if set, called for each hole at all levels
1100 * @hugetlb_entry: if set, called for each hugetlb entry
1101 *		   *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
1102 * 			      is used.
1103 *
1104 * (see walk_page_range for more details)
1105 */
1106struct mm_walk {
1107	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1108			 unsigned long next, struct mm_walk *walk);
1109	int (*pud_entry)(pud_t *pud, unsigned long addr,
1110	                 unsigned long next, struct mm_walk *walk);
1111	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1112			 unsigned long next, struct mm_walk *walk);
1113	int (*pte_entry)(pte_t *pte, unsigned long addr,
1114			 unsigned long next, struct mm_walk *walk);
1115	int (*pte_hole)(unsigned long addr, unsigned long next,
1116			struct mm_walk *walk);
1117	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1118			     unsigned long addr, unsigned long next,
1119			     struct mm_walk *walk);
1120	struct mm_struct *mm;
1121	void *private;
1122};
1123
1124int walk_page_range(unsigned long addr, unsigned long end,
1125		struct mm_walk *walk);
1126void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1127		unsigned long end, unsigned long floor, unsigned long ceiling);
1128int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1129			struct vm_area_struct *vma);
1130void unmap_mapping_range(struct address_space *mapping,
1131		loff_t const holebegin, loff_t const holelen, int even_cows);
1132int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1133	unsigned long *pfn);
1134int follow_phys(struct vm_area_struct *vma, unsigned long address,
1135		unsigned int flags, unsigned long *prot, resource_size_t *phys);
1136int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1137			void *buf, int len, int write);
1138
1139static inline void unmap_shared_mapping_range(struct address_space *mapping,
1140		loff_t const holebegin, loff_t const holelen)
1141{
1142	unmap_mapping_range(mapping, holebegin, holelen, 0);
1143}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1144
1145extern void truncate_pagecache(struct inode *inode, loff_t new);
1146extern void truncate_setsize(struct inode *inode, loff_t newsize);
 
1147void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1148int truncate_inode_page(struct address_space *mapping, struct page *page);
1149int generic_error_remove_page(struct address_space *mapping, struct page *page);
1150int invalidate_inode_page(struct page *page);
 
 
1151
1152#ifdef CONFIG_MMU
1153extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1154			unsigned long address, unsigned int flags);
1155extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1156			    unsigned long address, unsigned int fault_flags);
1157#else
1158static inline int handle_mm_fault(struct mm_struct *mm,
1159			struct vm_area_struct *vma, unsigned long address,
1160			unsigned int flags)
 
 
 
 
 
 
1161{
1162	/* should never happen if there's no MMU */
1163	BUG();
1164	return VM_FAULT_SIGBUS;
1165}
1166static inline int fixup_user_fault(struct task_struct *tsk,
1167		struct mm_struct *mm, unsigned long address,
1168		unsigned int fault_flags)
1169{
1170	/* should never happen if there's no MMU */
1171	BUG();
1172	return -EFAULT;
1173}
 
 
 
 
1174#endif
1175
1176extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
1177extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1178		void *buf, int len, int write);
 
 
1179
1180long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1181		      unsigned long start, unsigned long nr_pages,
1182		      unsigned int foll_flags, struct page **pages,
1183		      struct vm_area_struct **vmas, int *nonblocking);
1184long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1185		    unsigned long start, unsigned long nr_pages,
1186		    int write, int force, struct page **pages,
1187		    struct vm_area_struct **vmas);
1188int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1189			struct page **pages);
1190struct kvec;
1191int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1192			struct page **pages);
1193int get_kernel_page(unsigned long start, int write, struct page **pages);
1194struct page *get_dump_page(unsigned long addr);
1195
1196extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1197extern void do_invalidatepage(struct page *page, unsigned int offset,
1198			      unsigned int length);
1199
1200int __set_page_dirty_nobuffers(struct page *page);
1201int __set_page_dirty_no_writeback(struct page *page);
1202int redirty_page_for_writepage(struct writeback_control *wbc,
1203				struct page *page);
1204void account_page_dirtied(struct page *page, struct address_space *mapping);
1205void account_page_writeback(struct page *page);
1206int set_page_dirty(struct page *page);
1207int set_page_dirty_lock(struct page *page);
1208int clear_page_dirty_for_io(struct page *page);
1209int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1210
1211/* Is the vma a continuation of the stack vma above it? */
1212static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1213{
1214	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
1215}
 
 
 
1216
1217static inline int stack_guard_page_start(struct vm_area_struct *vma,
1218					     unsigned long addr)
 
 
 
 
 
1219{
1220	return (vma->vm_flags & VM_GROWSDOWN) &&
1221		(vma->vm_start == addr) &&
1222		!vma_growsdown(vma->vm_prev, addr);
1223}
1224
1225/* Is the vma a continuation of the stack vma below it? */
1226static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1227{
1228	return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1229}
1230
1231static inline int stack_guard_page_end(struct vm_area_struct *vma,
1232					   unsigned long addr)
1233{
1234	return (vma->vm_flags & VM_GROWSUP) &&
1235		(vma->vm_end == addr) &&
1236		!vma_growsup(vma->vm_next, addr);
 
 
 
 
 
 
 
1237}
1238
1239extern pid_t
1240vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1241
1242extern unsigned long move_page_tables(struct vm_area_struct *vma,
1243		unsigned long old_addr, struct vm_area_struct *new_vma,
1244		unsigned long new_addr, unsigned long len,
1245		bool need_rmap_locks);
1246extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1247			      unsigned long end, pgprot_t newprot,
1248			      int dirty_accountable, int prot_numa);
1249extern int mprotect_fixup(struct vm_area_struct *vma,
1250			  struct vm_area_struct **pprev, unsigned long start,
1251			  unsigned long end, unsigned long newflags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1252
1253/*
1254 * doesn't attempt to fault and will return short.
1255 */
1256int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1257			  struct page **pages);
 
 
 
 
 
 
1258/*
1259 * per-process(per-mm_struct) statistics.
1260 */
1261static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1262{
1263	long val = atomic_long_read(&mm->rss_stat.count[member]);
1264
1265#ifdef SPLIT_RSS_COUNTING
1266	/*
1267	 * counter is updated in asynchronous manner and may go to minus.
1268	 * But it's never be expected number for users.
1269	 */
1270	if (val < 0)
1271		val = 0;
1272#endif
1273	return (unsigned long)val;
1274}
1275
 
 
1276static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1277{
1278	atomic_long_add(value, &mm->rss_stat.count[member]);
 
 
1279}
1280
1281static inline void inc_mm_counter(struct mm_struct *mm, int member)
1282{
1283	atomic_long_inc(&mm->rss_stat.count[member]);
 
 
1284}
1285
1286static inline void dec_mm_counter(struct mm_struct *mm, int member)
1287{
1288	atomic_long_dec(&mm->rss_stat.count[member]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1289}
1290
1291static inline unsigned long get_mm_rss(struct mm_struct *mm)
1292{
1293	return get_mm_counter(mm, MM_FILEPAGES) +
1294		get_mm_counter(mm, MM_ANONPAGES);
 
1295}
1296
1297static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1298{
1299	return max(mm->hiwater_rss, get_mm_rss(mm));
1300}
1301
1302static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1303{
1304	return max(mm->hiwater_vm, mm->total_vm);
1305}
1306
1307static inline void update_hiwater_rss(struct mm_struct *mm)
1308{
1309	unsigned long _rss = get_mm_rss(mm);
1310
1311	if ((mm)->hiwater_rss < _rss)
1312		(mm)->hiwater_rss = _rss;
1313}
1314
1315static inline void update_hiwater_vm(struct mm_struct *mm)
1316{
1317	if (mm->hiwater_vm < mm->total_vm)
1318		mm->hiwater_vm = mm->total_vm;
1319}
1320
 
 
 
 
 
1321static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1322					 struct mm_struct *mm)
1323{
1324	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1325
1326	if (*maxrss < hiwater_rss)
1327		*maxrss = hiwater_rss;
1328}
1329
1330#if defined(SPLIT_RSS_COUNTING)
1331void sync_mm_rss(struct mm_struct *mm);
1332#else
1333static inline void sync_mm_rss(struct mm_struct *mm)
 
 
 
1334{
 
1335}
1336#endif
1337
1338int vma_wants_writenotify(struct vm_area_struct *vma);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1339
1340extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1341			       spinlock_t **ptl);
1342static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1343				    spinlock_t **ptl)
1344{
1345	pte_t *ptep;
1346	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1347	return ptep;
1348}
1349
1350#ifdef __PAGETABLE_PUD_FOLDED
1351static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
1352						unsigned long address)
1353{
1354	return 0;
1355}
1356#else
1357int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1358#endif
1359
1360#ifdef __PAGETABLE_PMD_FOLDED
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1361static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1362						unsigned long address)
1363{
1364	return 0;
1365}
 
 
 
 
1366#else
1367int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1368#endif
1369
1370int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1371		pmd_t *pmd, unsigned long address);
1372int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
 
 
1373
1374/*
1375 * The following ifdef needed to get the 4level-fixup.h header to work.
1376 * Remove it when 4level-fixup.h has been removed.
1377 */
1378#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1379static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
1380{
1381	return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1382		NULL: pud_offset(pgd, address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1383}
1384
1385static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1386{
1387	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1388		NULL: pmd_offset(pud, address);
1389}
1390#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1391
1392#if USE_SPLIT_PTE_PTLOCKS
1393#if ALLOC_SPLIT_PTLOCKS
1394void __init ptlock_cache_init(void);
1395extern bool ptlock_alloc(struct page *page);
1396extern void ptlock_free(struct page *page);
1397
1398static inline spinlock_t *ptlock_ptr(struct page *page)
1399{
1400	return page->ptl;
1401}
1402#else /* ALLOC_SPLIT_PTLOCKS */
1403static inline void ptlock_cache_init(void)
1404{
1405}
1406
1407static inline bool ptlock_alloc(struct page *page)
1408{
1409	return true;
1410}
1411
1412static inline void ptlock_free(struct page *page)
1413{
1414}
1415
1416static inline spinlock_t *ptlock_ptr(struct page *page)
1417{
1418	return &page->ptl;
1419}
1420#endif /* ALLOC_SPLIT_PTLOCKS */
1421
1422static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1423{
1424	return ptlock_ptr(pmd_page(*pmd));
1425}
1426
1427static inline bool ptlock_init(struct page *page)
 
 
 
 
 
 
 
1428{
1429	/*
1430	 * prep_new_page() initialize page->private (and therefore page->ptl)
1431	 * with 0. Make sure nobody took it in use in between.
1432	 *
1433	 * It can happen if arch try to use slab for page table allocation:
1434	 * slab code uses page->slab_cache and page->first_page (for tail
1435	 * pages), which share storage with page->ptl.
1436	 */
1437	VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1438	if (!ptlock_alloc(page))
1439		return false;
1440	spin_lock_init(ptlock_ptr(page));
1441	return true;
1442}
1443
1444/* Reset page->mapping so free_pages_check won't complain. */
1445static inline void pte_lock_deinit(struct page *page)
1446{
1447	page->mapping = NULL;
1448	ptlock_free(page);
1449}
1450
1451#else	/* !USE_SPLIT_PTE_PTLOCKS */
1452/*
1453 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1454 */
1455static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1456{
1457	return &mm->page_table_lock;
1458}
 
 
 
 
1459static inline void ptlock_cache_init(void) {}
1460static inline bool ptlock_init(struct page *page) { return true; }
1461static inline void pte_lock_deinit(struct page *page) {}
1462#endif /* USE_SPLIT_PTE_PTLOCKS */
1463
1464static inline void pgtable_init(void)
1465{
1466	ptlock_cache_init();
1467	pgtable_cache_init();
 
 
 
 
 
1468}
1469
1470static inline bool pgtable_page_ctor(struct page *page)
1471{
1472	inc_zone_page_state(page, NR_PAGETABLE);
1473	return ptlock_init(page);
 
 
 
1474}
1475
1476static inline void pgtable_page_dtor(struct page *page)
 
 
1477{
1478	pte_lock_deinit(page);
1479	dec_zone_page_state(page, NR_PAGETABLE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1480}
1481
1482#define pte_offset_map_lock(mm, pmd, address, ptlp)	\
1483({							\
1484	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
1485	pte_t *__pte = pte_offset_map(pmd, address);	\
1486	*(ptlp) = __ptl;				\
1487	spin_lock(__ptl);				\
1488	__pte;						\
1489})
1490
1491#define pte_unmap_unlock(pte, ptl)	do {		\
1492	spin_unlock(ptl);				\
1493	pte_unmap(pte);					\
1494} while (0)
1495
1496#define pte_alloc_map(mm, vma, pmd, address)				\
1497	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,	\
1498							pmd, address))?	\
1499	 NULL: pte_offset_map(pmd, address))
1500
1501#define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
1502	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,	\
1503							pmd, address))?	\
1504		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1505
1506#define pte_alloc_kernel(pmd, address)			\
1507	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1508		NULL: pte_offset_kernel(pmd, address))
1509
1510#if USE_SPLIT_PMD_PTLOCKS
1511
1512static struct page *pmd_to_page(pmd_t *pmd)
1513{
1514	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1515	return virt_to_page((void *)((unsigned long) pmd & mask));
1516}
1517
 
 
 
 
 
1518static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1519{
1520	return ptlock_ptr(pmd_to_page(pmd));
1521}
1522
1523static inline bool pgtable_pmd_page_ctor(struct page *page)
1524{
1525#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1526	page->pmd_huge_pte = NULL;
1527#endif
1528	return ptlock_init(page);
1529}
1530
1531static inline void pgtable_pmd_page_dtor(struct page *page)
1532{
1533#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1534	VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1535#endif
1536	ptlock_free(page);
1537}
1538
1539#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1540
1541#else
1542
1543static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1544{
1545	return &mm->page_table_lock;
1546}
1547
1548static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1549static inline void pgtable_pmd_page_dtor(struct page *page) {}
1550
1551#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1552
1553#endif
1554
1555static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1556{
1557	spinlock_t *ptl = pmd_lockptr(mm, pmd);
1558	spin_lock(ptl);
1559	return ptl;
1560}
1561
1562extern void free_area_init(unsigned long * zones_size);
1563extern void free_area_init_node(int nid, unsigned long * zones_size,
1564		unsigned long zone_start_pfn, unsigned long *zholes_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1565extern void free_initmem(void);
1566
1567/*
1568 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1569 * into the buddy system. The freed pages will be poisoned with pattern
1570 * "poison" if it's within range [0, UCHAR_MAX].
1571 * Return pages freed into the buddy system.
1572 */
1573extern unsigned long free_reserved_area(void *start, void *end,
1574					int poison, char *s);
1575
1576#ifdef	CONFIG_HIGHMEM
1577/*
1578 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1579 * and totalram_pages.
1580 */
1581extern void free_highmem_page(struct page *page);
1582#endif
1583
1584extern void adjust_managed_page_count(struct page *page, long count);
1585extern void mem_init_print_info(const char *str);
1586
1587/* Free the reserved page into the buddy system, so it gets managed. */
1588static inline void __free_reserved_page(struct page *page)
1589{
1590	ClearPageReserved(page);
1591	init_page_count(page);
1592	__free_page(page);
1593}
1594
1595static inline void free_reserved_page(struct page *page)
1596{
1597	__free_reserved_page(page);
1598	adjust_managed_page_count(page, 1);
1599}
1600
1601static inline void mark_page_reserved(struct page *page)
1602{
1603	SetPageReserved(page);
1604	adjust_managed_page_count(page, -1);
1605}
1606
 
 
 
 
 
1607/*
1608 * Default method to free all the __init memory into the buddy system.
1609 * The freed pages will be poisoned with pattern "poison" if it's within
1610 * range [0, UCHAR_MAX].
1611 * Return pages freed into the buddy system.
1612 */
1613static inline unsigned long free_initmem_default(int poison)
1614{
1615	extern char __init_begin[], __init_end[];
1616
1617	return free_reserved_area(&__init_begin, &__init_end,
1618				  poison, "unused kernel");
1619}
1620
1621static inline unsigned long get_num_physpages(void)
1622{
1623	int nid;
1624	unsigned long phys_pages = 0;
1625
1626	for_each_online_node(nid)
1627		phys_pages += node_present_pages(nid);
1628
1629	return phys_pages;
1630}
1631
1632#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1633/*
1634 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1635 * zones, allocate the backing mem_map and account for memory holes in a more
1636 * architecture independent manner. This is a substitute for creating the
1637 * zone_sizes[] and zholes_size[] arrays and passing them to
1638 * free_area_init_node()
1639 *
1640 * An architecture is expected to register range of page frames backed by
1641 * physical memory with memblock_add[_node]() before calling
1642 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1643 * usage, an architecture is expected to do something like
1644 *
1645 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1646 * 							 max_highmem_pfn};
1647 * for_each_valid_physical_page_range()
1648 * 	memblock_add_node(base, size, nid)
1649 * free_area_init_nodes(max_zone_pfns);
1650 *
1651 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1652 * registered physical page range.  Similarly
1653 * sparse_memory_present_with_active_regions() calls memory_present() for
1654 * each range when SPARSEMEM is enabled.
1655 *
1656 * See mm/page_alloc.c for more information on each function exposed by
1657 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1658 */
1659extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1660unsigned long node_map_pfn_alignment(void);
1661unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1662						unsigned long end_pfn);
1663extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1664						unsigned long end_pfn);
1665extern void get_pfn_range_for_nid(unsigned int nid,
1666			unsigned long *start_pfn, unsigned long *end_pfn);
1667extern unsigned long find_min_pfn_with_active_regions(void);
1668extern void free_bootmem_with_active_regions(int nid,
1669						unsigned long max_low_pfn);
1670extern void sparse_memory_present_with_active_regions(int nid);
1671
1672#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1673
1674#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1675    !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1676static inline int __early_pfn_to_nid(unsigned long pfn)
1677{
1678	return 0;
1679}
1680#else
1681/* please see mm/page_alloc.c */
1682extern int __meminit early_pfn_to_nid(unsigned long pfn);
1683/* there is a per-arch backend function. */
1684extern int __meminit __early_pfn_to_nid(unsigned long pfn);
1685#endif
1686
1687extern void set_dma_reserve(unsigned long new_dma_reserve);
1688extern void memmap_init_zone(unsigned long, int, unsigned long,
1689				unsigned long, enum memmap_context);
1690extern void setup_per_zone_wmarks(void);
1691extern int __meminit init_per_zone_wmark_min(void);
1692extern void mem_init(void);
1693extern void __init mmap_init(void);
1694extern void show_mem(unsigned int flags);
 
 
 
 
 
 
1695extern void si_meminfo(struct sysinfo * val);
1696extern void si_meminfo_node(struct sysinfo *val, int nid);
1697
1698extern __printf(3, 4)
1699void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1700
1701extern void setup_per_cpu_pageset(void);
1702
1703extern void zone_pcp_update(struct zone *zone);
1704extern void zone_pcp_reset(struct zone *zone);
1705
1706/* page_alloc.c */
1707extern int min_free_kbytes;
1708
1709/* nommu.c */
1710extern atomic_long_t mmap_pages_allocated;
1711extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1712
1713/* interval_tree.c */
1714void vma_interval_tree_insert(struct vm_area_struct *node,
1715			      struct rb_root *root);
1716void vma_interval_tree_insert_after(struct vm_area_struct *node,
1717				    struct vm_area_struct *prev,
1718				    struct rb_root *root);
1719void vma_interval_tree_remove(struct vm_area_struct *node,
1720			      struct rb_root *root);
1721struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1722				unsigned long start, unsigned long last);
1723struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1724				unsigned long start, unsigned long last);
1725
1726#define vma_interval_tree_foreach(vma, root, start, last)		\
1727	for (vma = vma_interval_tree_iter_first(root, start, last);	\
1728	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
1729
1730static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1731					struct list_head *list)
1732{
1733	list_add_tail(&vma->shared.nonlinear, list);
1734}
1735
1736void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1737				   struct rb_root *root);
1738void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1739				   struct rb_root *root);
1740struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1741	struct rb_root *root, unsigned long start, unsigned long last);
 
1742struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1743	struct anon_vma_chain *node, unsigned long start, unsigned long last);
1744#ifdef CONFIG_DEBUG_VM_RB
1745void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1746#endif
1747
1748#define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
1749	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1750	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1751
1752/* mmap.c */
1753extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1754extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1755	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
1756extern struct vm_area_struct *vma_merge(struct mm_struct *,
1757	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1758	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1759	struct mempolicy *);
1760extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1761extern int split_vma(struct mm_struct *,
1762	struct vm_area_struct *, unsigned long addr, int new_below);
1763extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1764extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1765	struct rb_node **, struct rb_node *);
1766extern void unlink_file_vma(struct vm_area_struct *);
1767extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1768	unsigned long addr, unsigned long len, pgoff_t pgoff,
1769	bool *need_rmap_locks);
1770extern void exit_mmap(struct mm_struct *);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1771
1772extern int mm_take_all_locks(struct mm_struct *mm);
1773extern void mm_drop_all_locks(struct mm_struct *mm);
1774
1775extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
 
1776extern struct file *get_mm_exe_file(struct mm_struct *mm);
 
1777
1778extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
 
 
 
 
1779extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1780				   unsigned long addr, unsigned long len,
1781				   unsigned long flags, struct page **pages);
1782extern int install_special_mapping(struct mm_struct *mm,
1783				   unsigned long addr, unsigned long len,
1784				   unsigned long flags, struct page **pages);
 
1785
1786extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
 
 
 
 
 
 
 
 
1787
1788extern unsigned long mmap_region(struct file *file, unsigned long addr,
1789	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1790extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 
1791	unsigned long len, unsigned long prot, unsigned long flags,
1792	unsigned long pgoff, unsigned long *populate);
1793extern int do_munmap(struct mm_struct *, unsigned long, size_t);
 
 
 
 
 
 
 
 
 
1794
1795#ifdef CONFIG_MMU
1796extern int __mm_populate(unsigned long addr, unsigned long len,
1797			 int ignore_errors);
1798static inline void mm_populate(unsigned long addr, unsigned long len)
1799{
1800	/* Ignore errors */
1801	(void) __mm_populate(addr, len, 1);
1802}
1803#else
1804static inline void mm_populate(unsigned long addr, unsigned long len) {}
1805#endif
1806
1807/* These take the mm semaphore themselves */
1808extern unsigned long vm_brk(unsigned long, unsigned long);
1809extern int vm_munmap(unsigned long, size_t);
1810extern unsigned long vm_mmap(struct file *, unsigned long,
1811        unsigned long, unsigned long,
1812        unsigned long, unsigned long);
1813
1814struct vm_unmapped_area_info {
1815#define VM_UNMAPPED_AREA_TOPDOWN 1
1816	unsigned long flags;
1817	unsigned long length;
1818	unsigned long low_limit;
1819	unsigned long high_limit;
1820	unsigned long align_mask;
1821	unsigned long align_offset;
 
1822};
1823
1824extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1825extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1826
1827/*
1828 * Search for an unmapped address range.
1829 *
1830 * We are looking for a range that:
1831 * - does not intersect with any VMA;
1832 * - is contained within the [low_limit, high_limit) interval;
1833 * - is at least the desired size.
1834 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1835 */
1836static inline unsigned long
1837vm_unmapped_area(struct vm_unmapped_area_info *info)
1838{
1839	if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1840		return unmapped_area(info);
1841	else
1842		return unmapped_area_topdown(info);
1843}
1844
1845/* truncate.c */
1846extern void truncate_inode_pages(struct address_space *, loff_t);
1847extern void truncate_inode_pages_range(struct address_space *,
1848				       loff_t lstart, loff_t lend);
1849extern void truncate_inode_pages_final(struct address_space *);
1850
1851/* generic vm_area_ops exported for stackable file systems */
1852extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1853extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1854extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
1855
1856/* mm/page-writeback.c */
1857int write_one_page(struct page *page, int wait);
1858void task_dirty_inc(struct task_struct *tsk);
1859
1860/* readahead.c */
1861#define VM_MAX_READAHEAD	128	/* kbytes */
1862#define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
1863
1864int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1865			pgoff_t offset, unsigned long nr_to_read);
1866
1867void page_cache_sync_readahead(struct address_space *mapping,
1868			       struct file_ra_state *ra,
1869			       struct file *filp,
1870			       pgoff_t offset,
1871			       unsigned long size);
1872
1873void page_cache_async_readahead(struct address_space *mapping,
1874				struct file_ra_state *ra,
1875				struct file *filp,
1876				struct page *pg,
1877				pgoff_t offset,
1878				unsigned long size);
1879
1880unsigned long max_sane_readahead(unsigned long nr);
1881
 
1882/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1883extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
 
1884
1885/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1886extern int expand_downwards(struct vm_area_struct *vma,
1887		unsigned long address);
1888#if VM_GROWSUP
1889extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1890#else
1891  #define expand_upwards(vma, address) do { } while (0)
1892#endif
1893
1894/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1895extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1896extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1897					     struct vm_area_struct **pprev);
1898
1899/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1900   NULL if none.  Assume start_addr < end_addr. */
1901static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1902{
1903	struct vm_area_struct * vma = find_vma(mm,start_addr);
 
1904
1905	if (vma && end_addr <= vma->vm_start)
1906		vma = NULL;
1907	return vma;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1908}
1909
1910static inline unsigned long vma_pages(struct vm_area_struct *vma)
1911{
1912	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1913}
1914
1915/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
1916static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1917				unsigned long vm_start, unsigned long vm_end)
1918{
1919	struct vm_area_struct *vma = find_vma(mm, vm_start);
1920
1921	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1922		vma = NULL;
1923
1924	return vma;
1925}
1926
 
 
 
 
 
 
1927#ifdef CONFIG_MMU
1928pgprot_t vm_get_page_prot(unsigned long vm_flags);
 
1929#else
1930static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1931{
1932	return __pgprot(0);
1933}
 
 
 
 
1934#endif
1935
 
 
1936#ifdef CONFIG_NUMA_BALANCING
1937unsigned long change_prot_numa(struct vm_area_struct *vma,
1938			unsigned long start, unsigned long end);
1939#endif
1940
1941struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
 
1942int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1943			unsigned long pfn, unsigned long size, pgprot_t);
 
 
1944int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1945int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1946			unsigned long pfn);
1947int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
 
 
 
 
1948			unsigned long pfn);
 
 
 
 
 
 
1949int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
1950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1951
1952struct page *follow_page_mask(struct vm_area_struct *vma,
1953			      unsigned long address, unsigned int foll_flags,
1954			      unsigned int *page_mask);
1955
1956static inline struct page *follow_page(struct vm_area_struct *vma,
1957		unsigned long address, unsigned int foll_flags)
1958{
1959	unsigned int unused_page_mask;
1960	return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1961}
1962
1963#define FOLL_WRITE	0x01	/* check pte is writable */
1964#define FOLL_TOUCH	0x02	/* mark page accessed */
1965#define FOLL_GET	0x04	/* do get_page on page */
1966#define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
1967#define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
1968#define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
1969				 * and return without waiting upon it */
1970#define FOLL_MLOCK	0x40	/* mark page as mlocked */
1971#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
1972#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
1973#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
1974#define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
1975
1976typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1977			void *data);
1978extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1979			       unsigned long size, pte_fn_t fn, void *data);
1980
1981#ifdef CONFIG_PROC_FS
1982void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1983#else
1984static inline void vm_stat_account(struct mm_struct *mm,
1985			unsigned long flags, struct file *file, long pages)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1986{
1987	mm->total_vm += pages;
 
1988}
1989#endif /* CONFIG_PROC_FS */
1990
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1991#ifdef CONFIG_DEBUG_PAGEALLOC
1992extern void kernel_map_pages(struct page *page, int numpages, int enable);
1993#ifdef CONFIG_HIBERNATION
1994extern bool kernel_page_present(struct page *page);
1995#endif /* CONFIG_HIBERNATION */
1996#else
1997static inline void
1998kernel_map_pages(struct page *page, int numpages, int enable) {}
1999#ifdef CONFIG_HIBERNATION
2000static inline bool kernel_page_present(struct page *page) { return true; }
2001#endif /* CONFIG_HIBERNATION */
2002#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2003
 
2004extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2005#ifdef	__HAVE_ARCH_GATE_AREA
2006int in_gate_area_no_mm(unsigned long addr);
2007int in_gate_area(struct mm_struct *mm, unsigned long addr);
2008#else
2009int in_gate_area_no_mm(unsigned long addr);
2010#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
 
 
 
 
 
 
 
2011#endif	/* __HAVE_ARCH_GATE_AREA */
2012
 
 
2013#ifdef CONFIG_SYSCTL
2014extern int sysctl_drop_caches;
2015int drop_caches_sysctl_handler(struct ctl_table *, int,
2016					void __user *, size_t *, loff_t *);
2017#endif
2018
2019unsigned long shrink_slab(struct shrink_control *shrink,
2020			  unsigned long nr_pages_scanned,
2021			  unsigned long lru_pages);
2022
2023#ifndef CONFIG_MMU
2024#define randomize_va_space 0
2025#else
2026extern int randomize_va_space;
2027#endif
2028
2029const char * arch_vma_name(struct vm_area_struct *vma);
 
2030void print_vma_addr(char *prefix, unsigned long rip);
 
 
 
 
 
2031
2032void sparse_mem_maps_populate_node(struct page **map_map,
2033				   unsigned long pnum_begin,
2034				   unsigned long pnum_end,
2035				   unsigned long map_count,
2036				   int nodeid);
2037
2038struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
2039pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2040pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
 
2041pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2042pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
 
2043void *vmemmap_alloc_block(unsigned long size, int node);
2044void *vmemmap_alloc_block_buf(unsigned long size, int node);
 
 
2045void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
 
 
 
 
2046int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2047			       int node);
2048int vmemmap_populate(unsigned long start, unsigned long end, int node);
 
 
 
2049void vmemmap_populate_print_last(void);
2050#ifdef CONFIG_MEMORY_HOTPLUG
2051void vmemmap_free(unsigned long start, unsigned long end);
 
2052#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2053void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2054				  unsigned long size);
2055
2056enum mf_flags {
2057	MF_COUNT_INCREASED = 1 << 0,
2058	MF_ACTION_REQUIRED = 1 << 1,
2059	MF_MUST_KILL = 1 << 2,
2060	MF_SOFT_OFFLINE = 1 << 3,
 
 
 
 
2061};
2062extern int memory_failure(unsigned long pfn, int trapno, int flags);
2063extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
 
 
2064extern int unpoison_memory(unsigned long pfn);
2065extern int sysctl_memory_failure_early_kill;
2066extern int sysctl_memory_failure_recovery;
2067extern void shake_page(struct page *p, int access);
2068extern atomic_long_t num_poisoned_pages;
2069extern int soft_offline_page(struct page *page, int flags);
2070
2071#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2072extern void clear_huge_page(struct page *page,
2073			    unsigned long addr,
2074			    unsigned int pages_per_huge_page);
2075extern void copy_user_huge_page(struct page *dst, struct page *src,
2076				unsigned long addr, struct vm_area_struct *vma,
2077				unsigned int pages_per_huge_page);
2078#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
 
 
2079
2080#ifdef CONFIG_DEBUG_PAGEALLOC
2081extern unsigned int _debug_guardpage_minorder;
 
 
 
2082
2083static inline unsigned int debug_guardpage_minorder(void)
2084{
2085	return _debug_guardpage_minorder;
2086}
2087
2088static inline bool page_is_guard(struct page *page)
2089{
2090	return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
2091}
 
 
 
 
 
2092#else
2093static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2094static inline bool page_is_guard(struct page *page) { return false; }
2095#endif /* CONFIG_DEBUG_PAGEALLOC */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2096
2097#if MAX_NUMNODES > 1
2098void __init setup_nr_node_ids(void);
2099#else
2100static inline void setup_nr_node_ids(void) {}
2101#endif
2102
2103#endif /* __KERNEL__ */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2104#endif /* _LINUX_MM_H */
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_H
   3#define _LINUX_MM_H
   4
   5#include <linux/errno.h>
 
 
 
   6#include <linux/mmdebug.h>
   7#include <linux/gfp.h>
   8#include <linux/pgalloc_tag.h>
   9#include <linux/bug.h>
  10#include <linux/list.h>
  11#include <linux/mmzone.h>
  12#include <linux/rbtree.h>
  13#include <linux/atomic.h>
  14#include <linux/debug_locks.h>
  15#include <linux/mm_types.h>
  16#include <linux/mmap_lock.h>
  17#include <linux/range.h>
  18#include <linux/pfn.h>
  19#include <linux/percpu-refcount.h>
  20#include <linux/bit_spinlock.h>
  21#include <linux/shrinker.h>
  22#include <linux/resource.h>
  23#include <linux/page_ext.h>
  24#include <linux/err.h>
  25#include <linux/page-flags.h>
  26#include <linux/page_ref.h>
  27#include <linux/overflow.h>
  28#include <linux/sizes.h>
  29#include <linux/sched.h>
  30#include <linux/pgtable.h>
  31#include <linux/kasan.h>
  32#include <linux/memremap.h>
  33#include <linux/slab.h>
  34#include <linux/cacheinfo.h>
  35
  36struct mempolicy;
  37struct anon_vma;
  38struct anon_vma_chain;
 
  39struct user_struct;
  40struct pt_regs;
  41struct folio_batch;
  42
  43extern int sysctl_page_lock_unfairness;
  44
  45void mm_core_init(void);
  46void init_mm_internals(void);
  47
  48#ifndef CONFIG_NUMA		/* Don't use mapnrs, do it properly */
  49extern unsigned long max_mapnr;
  50
  51static inline void set_max_mapnr(unsigned long limit)
  52{
  53	max_mapnr = limit;
  54}
  55#else
  56static inline void set_max_mapnr(unsigned long limit) { }
  57#endif
  58
  59extern atomic_long_t _totalram_pages;
  60static inline unsigned long totalram_pages(void)
  61{
  62	return (unsigned long)atomic_long_read(&_totalram_pages);
  63}
  64
  65static inline void totalram_pages_inc(void)
  66{
  67	atomic_long_inc(&_totalram_pages);
  68}
  69
  70static inline void totalram_pages_dec(void)
  71{
  72	atomic_long_dec(&_totalram_pages);
  73}
  74
  75static inline void totalram_pages_add(long count)
  76{
  77	atomic_long_add(count, &_totalram_pages);
  78}
  79
  80extern void * high_memory;
  81extern int page_cluster;
  82extern const int page_cluster_max;
  83
  84#ifdef CONFIG_SYSCTL
  85extern int sysctl_legacy_va_layout;
  86#else
  87#define sysctl_legacy_va_layout 0
  88#endif
  89
  90#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
  91extern const int mmap_rnd_bits_min;
  92extern int mmap_rnd_bits_max __ro_after_init;
  93extern int mmap_rnd_bits __read_mostly;
  94#endif
  95#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
  96extern const int mmap_rnd_compat_bits_min;
  97extern const int mmap_rnd_compat_bits_max;
  98extern int mmap_rnd_compat_bits __read_mostly;
  99#endif
 100
 101#ifndef DIRECT_MAP_PHYSMEM_END
 102# ifdef MAX_PHYSMEM_BITS
 103# define DIRECT_MAP_PHYSMEM_END	((1ULL << MAX_PHYSMEM_BITS) - 1)
 104# else
 105# define DIRECT_MAP_PHYSMEM_END	(((phys_addr_t)-1)&~(1ULL<<63))
 106# endif
 107#endif
 108
 109#include <asm/page.h>
 
 110#include <asm/processor.h>
 111
 112#ifndef __pa_symbol
 113#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
 114#endif
 115
 116#ifndef page_to_virt
 117#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
 118#endif
 119
 120#ifndef lm_alias
 121#define lm_alias(x)	__va(__pa_symbol(x))
 122#endif
 123
 124/*
 125 * To prevent common memory management code establishing
 126 * a zero page mapping on a read fault.
 127 * This macro should be defined within <asm/pgtable.h>.
 128 * s390 does this to prevent multiplexing of hardware bits
 129 * related to the physical page in case of virtualization.
 130 */
 131#ifndef mm_forbids_zeropage
 132#define mm_forbids_zeropage(X)	(0)
 133#endif
 134
 135/*
 136 * On some architectures it is expensive to call memset() for small sizes.
 137 * If an architecture decides to implement their own version of
 138 * mm_zero_struct_page they should wrap the defines below in a #ifndef and
 139 * define their own version of this macro in <asm/pgtable.h>
 140 */
 141#if BITS_PER_LONG == 64
 142/* This function must be updated when the size of struct page grows above 96
 143 * or reduces below 56. The idea that compiler optimizes out switch()
 144 * statement, and only leaves move/store instructions. Also the compiler can
 145 * combine write statements if they are both assignments and can be reordered,
 146 * this can result in several of the writes here being dropped.
 147 */
 148#define	mm_zero_struct_page(pp) __mm_zero_struct_page(pp)
 149static inline void __mm_zero_struct_page(struct page *page)
 150{
 151	unsigned long *_pp = (void *)page;
 152
 153	 /* Check that struct page is either 56, 64, 72, 80, 88 or 96 bytes */
 154	BUILD_BUG_ON(sizeof(struct page) & 7);
 155	BUILD_BUG_ON(sizeof(struct page) < 56);
 156	BUILD_BUG_ON(sizeof(struct page) > 96);
 157
 158	switch (sizeof(struct page)) {
 159	case 96:
 160		_pp[11] = 0;
 161		fallthrough;
 162	case 88:
 163		_pp[10] = 0;
 164		fallthrough;
 165	case 80:
 166		_pp[9] = 0;
 167		fallthrough;
 168	case 72:
 169		_pp[8] = 0;
 170		fallthrough;
 171	case 64:
 172		_pp[7] = 0;
 173		fallthrough;
 174	case 56:
 175		_pp[6] = 0;
 176		_pp[5] = 0;
 177		_pp[4] = 0;
 178		_pp[3] = 0;
 179		_pp[2] = 0;
 180		_pp[1] = 0;
 181		_pp[0] = 0;
 182	}
 183}
 184#else
 185#define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
 186#endif
 187
 188/*
 189 * Default maximum number of active map areas, this limits the number of vmas
 190 * per mm struct. Users can overwrite this number by sysctl but there is a
 191 * problem.
 192 *
 193 * When a program's coredump is generated as ELF format, a section is created
 194 * per a vma. In ELF, the number of sections is represented in unsigned short.
 195 * This means the number of sections should be smaller than 65535 at coredump.
 196 * Because the kernel adds some informative sections to a image of program at
 197 * generating coredump, we need some margin. The number of extra sections is
 198 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 199 *
 200 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
 201 * not a hard limit any more. Although some userspace tools can be surprised by
 202 * that.
 203 */
 204#define MAPCOUNT_ELF_CORE_MARGIN	(5)
 205#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
 206
 207extern int sysctl_max_map_count;
 208
 209extern unsigned long sysctl_user_reserve_kbytes;
 210extern unsigned long sysctl_admin_reserve_kbytes;
 211
 212extern int sysctl_overcommit_memory;
 213extern int sysctl_overcommit_ratio;
 214extern unsigned long sysctl_overcommit_kbytes;
 215
 216int overcommit_ratio_handler(const struct ctl_table *, int, void *, size_t *,
 217		loff_t *);
 218int overcommit_kbytes_handler(const struct ctl_table *, int, void *, size_t *,
 219		loff_t *);
 220int overcommit_policy_handler(const struct ctl_table *, int, void *, size_t *,
 221		loff_t *);
 222
 223#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 224#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 225#define folio_page_idx(folio, p)	(page_to_pfn(p) - folio_pfn(folio))
 226#else
 227#define nth_page(page,n) ((page) + (n))
 228#define folio_page_idx(folio, p)	((p) - &(folio)->page)
 229#endif
 230
 231/* to align the pointer to the (next) page boundary */
 232#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
 233
 234/* to align the pointer to the (prev) page boundary */
 235#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE)
 236
 237/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
 238#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
 239
 240static inline struct folio *lru_to_folio(struct list_head *head)
 241{
 242	return list_entry((head)->prev, struct folio, lru);
 243}
 244
 245void setup_initial_init_mm(void *start_code, void *end_code,
 246			   void *end_data, void *brk);
 247
 248/*
 249 * Linux kernel virtual memory manager primitives.
 250 * The idea being to have a "virtual" mm in the same way
 251 * we have a virtual fs - giving a cleaner interface to the
 252 * mm details, and allowing different kinds of memory mappings
 253 * (from shared memory to executable loading to arbitrary
 254 * mmap() functions).
 255 */
 256
 257struct vm_area_struct *vm_area_alloc(struct mm_struct *);
 258struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
 259void vm_area_free(struct vm_area_struct *);
 260/* Use only if VMA has no other users */
 261void __vm_area_free(struct vm_area_struct *vma);
 262
 263#ifndef CONFIG_MMU
 264extern struct rb_root nommu_region_tree;
 265extern struct rw_semaphore nommu_region_sem;
 266
 267extern unsigned int kobjsize(const void *objp);
 268#endif
 269
 270/*
 271 * vm_flags in vm_area_struct, see mm_types.h.
 272 * When changing, update also include/trace/events/mmflags.h
 273 */
 274#define VM_NONE		0x00000000
 275
 276#define VM_READ		0x00000001	/* currently active flags */
 277#define VM_WRITE	0x00000002
 278#define VM_EXEC		0x00000004
 279#define VM_SHARED	0x00000008
 280
 281/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
 282#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
 283#define VM_MAYWRITE	0x00000020
 284#define VM_MAYEXEC	0x00000040
 285#define VM_MAYSHARE	0x00000080
 286
 287#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
 288#ifdef CONFIG_MMU
 289#define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
 290#else /* CONFIG_MMU */
 291#define VM_MAYOVERLAY	0x00000200	/* nommu: R/O MAP_PRIVATE mapping that might overlay a file mapping */
 292#define VM_UFFD_MISSING	0
 293#endif /* CONFIG_MMU */
 294#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 295#define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
 296
 297#define VM_LOCKED	0x00002000
 298#define VM_IO           0x00004000	/* Memory mapped I/O or similar */
 299
 300					/* Used by sys_madvise() */
 301#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
 302#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
 303
 304#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
 305#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 306#define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
 307#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
 308#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 309#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 310#define VM_SYNC		0x00800000	/* Synchronous page faults */
 311#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
 312#define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
 313#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
 314
 315#ifdef CONFIG_MEM_SOFT_DIRTY
 316# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
 317#else
 318# define VM_SOFTDIRTY	0
 319#endif
 320
 321#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
 322#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
 323#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
 324#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
 325
 326#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
 327#define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
 328#define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
 329#define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
 330#define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
 331#define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
 332#define VM_HIGH_ARCH_BIT_5	37	/* bit only usable on 64-bit architectures */
 333#define VM_HIGH_ARCH_BIT_6	38	/* bit only usable on 64-bit architectures */
 334#define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
 335#define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
 336#define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
 337#define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
 338#define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
 339#define VM_HIGH_ARCH_5	BIT(VM_HIGH_ARCH_BIT_5)
 340#define VM_HIGH_ARCH_6	BIT(VM_HIGH_ARCH_BIT_6)
 341#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
 342
 343#ifdef CONFIG_ARCH_HAS_PKEYS
 344# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0
 345# define VM_PKEY_BIT0  VM_HIGH_ARCH_0
 346# define VM_PKEY_BIT1  VM_HIGH_ARCH_1
 347# define VM_PKEY_BIT2  VM_HIGH_ARCH_2
 348#if CONFIG_ARCH_PKEY_BITS > 3
 349# define VM_PKEY_BIT3  VM_HIGH_ARCH_3
 350#else
 351# define VM_PKEY_BIT3  0
 352#endif
 353#if CONFIG_ARCH_PKEY_BITS > 4
 354# define VM_PKEY_BIT4  VM_HIGH_ARCH_4
 355#else
 356# define VM_PKEY_BIT4  0
 357#endif
 358#endif /* CONFIG_ARCH_HAS_PKEYS */
 359
 360#ifdef CONFIG_X86_USER_SHADOW_STACK
 361/*
 362 * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
 363 * support core mm.
 364 *
 365 * These VMAs will get a single end guard page. This helps userspace protect
 366 * itself from attacks. A single page is enough for current shadow stack archs
 367 * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
 368 * for more details on the guard size.
 369 */
 370# define VM_SHADOW_STACK	VM_HIGH_ARCH_5
 371#endif
 372
 373#if defined(CONFIG_ARM64_GCS)
 374/*
 375 * arm64's Guarded Control Stack implements similar functionality and
 376 * has similar constraints to shadow stacks.
 377 */
 378# define VM_SHADOW_STACK	VM_HIGH_ARCH_6
 379#endif
 380
 381#ifndef VM_SHADOW_STACK
 382# define VM_SHADOW_STACK	VM_NONE
 383#endif
 384
 385#if defined(CONFIG_X86)
 386# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
 387#elif defined(CONFIG_PPC64)
 388# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
 389#elif defined(CONFIG_PARISC)
 390# define VM_GROWSUP	VM_ARCH_1
 391#elif defined(CONFIG_SPARC64)
 392# define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
 393# define VM_ARCH_CLEAR	VM_SPARC_ADI
 394#elif defined(CONFIG_ARM64)
 395# define VM_ARM64_BTI	VM_ARCH_1	/* BTI guarded page, a.k.a. GP bit */
 396# define VM_ARCH_CLEAR	VM_ARM64_BTI
 397#elif !defined(CONFIG_MMU)
 398# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
 399#endif
 400
 401#if defined(CONFIG_ARM64_MTE)
 402# define VM_MTE		VM_HIGH_ARCH_4	/* Use Tagged memory for access control */
 403# define VM_MTE_ALLOWED	VM_HIGH_ARCH_5	/* Tagged memory permitted */
 404#else
 405# define VM_MTE		VM_NONE
 406# define VM_MTE_ALLOWED	VM_NONE
 407#endif
 408
 409#ifndef VM_GROWSUP
 410# define VM_GROWSUP	VM_NONE
 411#endif
 412
 413#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
 414# define VM_UFFD_MINOR_BIT	38
 415# define VM_UFFD_MINOR		BIT(VM_UFFD_MINOR_BIT)	/* UFFD minor faults */
 416#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 417# define VM_UFFD_MINOR		VM_NONE
 418#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
 419
 420/*
 421 * This flag is used to connect VFIO to arch specific KVM code. It
 422 * indicates that the memory under this VMA is safe for use with any
 423 * non-cachable memory type inside KVM. Some VFIO devices, on some
 424 * platforms, are thought to be unsafe and can cause machine crashes
 425 * if KVM does not lock down the memory type.
 426 */
 427#ifdef CONFIG_64BIT
 428#define VM_ALLOW_ANY_UNCACHED_BIT	39
 429#define VM_ALLOW_ANY_UNCACHED		BIT(VM_ALLOW_ANY_UNCACHED_BIT)
 430#else
 431#define VM_ALLOW_ANY_UNCACHED		VM_NONE
 432#endif
 433
 434#ifdef CONFIG_64BIT
 435#define VM_DROPPABLE_BIT	40
 436#define VM_DROPPABLE		BIT(VM_DROPPABLE_BIT)
 437#elif defined(CONFIG_PPC32)
 438#define VM_DROPPABLE		VM_ARCH_1
 439#else
 440#define VM_DROPPABLE		VM_NONE
 441#endif
 442
 443#ifdef CONFIG_64BIT
 444/* VM is sealed, in vm_flags */
 445#define VM_SEALED	_BITUL(63)
 446#endif
 447
 448/* Bits set in the VMA until the stack is in its final location */
 449#define VM_STACK_INCOMPLETE_SETUP (VM_RAND_READ | VM_SEQ_READ | VM_STACK_EARLY)
 450
 451#define TASK_EXEC ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0)
 452
 453/* Common data flag combinations */
 454#define VM_DATA_FLAGS_TSK_EXEC	(VM_READ | VM_WRITE | TASK_EXEC | \
 455				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 456#define VM_DATA_FLAGS_NON_EXEC	(VM_READ | VM_WRITE | VM_MAYREAD | \
 457				 VM_MAYWRITE | VM_MAYEXEC)
 458#define VM_DATA_FLAGS_EXEC	(VM_READ | VM_WRITE | VM_EXEC | \
 459				 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
 460
 461#ifndef VM_DATA_DEFAULT_FLAGS		/* arch can override this */
 462#define VM_DATA_DEFAULT_FLAGS  VM_DATA_FLAGS_EXEC
 463#endif
 464
 465#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 466#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 467#endif
 468
 469#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
 470
 471#ifdef CONFIG_STACK_GROWSUP
 472#define VM_STACK	VM_GROWSUP
 473#define VM_STACK_EARLY	VM_GROWSDOWN
 474#else
 475#define VM_STACK	VM_GROWSDOWN
 476#define VM_STACK_EARLY	0
 477#endif
 478
 479#define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 480
 481/* VMA basic access permission flags */
 482#define VM_ACCESS_FLAGS (VM_READ | VM_WRITE | VM_EXEC)
 483
 484
 485/*
 486 * Special vmas that are non-mergable, non-mlock()able.
 
 487 */
 488#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 489
 490/* This mask prevents VMA from being scanned with khugepaged */
 491#define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
 492
 493/* This mask defines which mm->def_flags a process can inherit its parent */
 494#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
 495
 496/* This mask represents all the VMA flag bits used by mlock */
 497#define VM_LOCKED_MASK	(VM_LOCKED | VM_LOCKONFAULT)
 498
 499/* Arch-specific flags to clear when updating VM flags on protection change */
 500#ifndef VM_ARCH_CLEAR
 501# define VM_ARCH_CLEAR	VM_NONE
 502#endif
 503#define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
 504
 505/*
 506 * mapping from the currently active vm_flags protection bits (the
 507 * low four bits) to a page protection mask..
 508 */
 
 509
 510/*
 511 * The default fault flags that should be used by most of the
 512 * arch-specific page fault handlers.
 513 */
 514#define FAULT_FLAG_DEFAULT  (FAULT_FLAG_ALLOW_RETRY | \
 515			     FAULT_FLAG_KILLABLE | \
 516			     FAULT_FLAG_INTERRUPTIBLE)
 517
 518/**
 519 * fault_flag_allow_retry_first - check ALLOW_RETRY the first time
 520 * @flags: Fault flags.
 521 *
 522 * This is mostly used for places where we want to try to avoid taking
 523 * the mmap_lock for too long a time when waiting for another condition
 524 * to change, in which case we can try to be polite to release the
 525 * mmap_lock in the first round to avoid potential starvation of other
 526 * processes that would also want the mmap_lock.
 527 *
 528 * Return: true if the page fault allows retry and this is the first
 529 * attempt of the fault handling; false otherwise.
 530 */
 531static inline bool fault_flag_allow_retry_first(enum fault_flag flags)
 532{
 533	return (flags & FAULT_FLAG_ALLOW_RETRY) &&
 534	    (!(flags & FAULT_FLAG_TRIED));
 535}
 536
 537#define FAULT_FLAG_TRACE \
 538	{ FAULT_FLAG_WRITE,		"WRITE" }, \
 539	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
 540	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
 541	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
 542	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
 543	{ FAULT_FLAG_TRIED,		"TRIED" }, \
 544	{ FAULT_FLAG_USER,		"USER" }, \
 545	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
 546	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }, \
 547	{ FAULT_FLAG_INTERRUPTIBLE,	"INTERRUPTIBLE" }, \
 548	{ FAULT_FLAG_VMA_LOCK,		"VMA_LOCK" }
 549
 550/*
 551 * vm_fault is filled by the pagefault handler and passed to the vma's
 552 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 553 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 554 *
 555 * MM layer fills up gfp_mask for page allocations but fault handler might
 556 * alter it if its implementation requires a different allocation context.
 557 *
 558 * pgoff should be used in favour of virtual_address, if possible.
 559 */
 560struct vm_fault {
 561	const struct {
 562		struct vm_area_struct *vma;	/* Target VMA */
 563		gfp_t gfp_mask;			/* gfp mask to be used for allocations */
 564		pgoff_t pgoff;			/* Logical page offset based on vma */
 565		unsigned long address;		/* Faulting virtual address - masked */
 566		unsigned long real_address;	/* Faulting virtual address - unmasked */
 567	};
 568	enum fault_flag flags;		/* FAULT_FLAG_xxx flags
 569					 * XXX: should really be 'const' */
 570	pmd_t *pmd;			/* Pointer to pmd entry matching
 571					 * the 'address' */
 572	pud_t *pud;			/* Pointer to pud entry matching
 573					 * the 'address'
 574					 */
 575	union {
 576		pte_t orig_pte;		/* Value of PTE at the time of fault */
 577		pmd_t orig_pmd;		/* Value of PMD at the time of fault,
 578					 * used by PMD fault only.
 579					 */
 580	};
 581
 582	struct page *cow_page;		/* Page handler may use for COW fault */
 583	struct page *page;		/* ->fault handlers should return a
 584					 * page here, unless VM_FAULT_NOPAGE
 585					 * is set (which is also implied by
 586					 * VM_FAULT_ERROR).
 587					 */
 588	/* These three entries are valid only while holding ptl lock */
 589	pte_t *pte;			/* Pointer to pte entry matching
 590					 * the 'address'. NULL if the page
 591					 * table hasn't been allocated.
 592					 */
 593	spinlock_t *ptl;		/* Page table lock.
 594					 * Protects pte page table if 'pte'
 595					 * is not NULL, otherwise pmd.
 596					 */
 597	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
 598					 * vm_ops->map_pages() sets up a page
 599					 * table from atomic context.
 600					 * do_fault_around() pre-allocates
 601					 * page table to avoid allocation from
 602					 * atomic context.
 603					 */
 604};
 605
 606/*
 607 * These are the virtual MM functions - opening of an area, closing and
 608 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 609 * to the functions called when a no-page or a wp-page exception occurs.
 610 */
 611struct vm_operations_struct {
 612	void (*open)(struct vm_area_struct * area);
 613	/**
 614	 * @close: Called when the VMA is being removed from the MM.
 615	 * Context: User context.  May sleep.  Caller holds mmap_lock.
 616	 */
 617	void (*close)(struct vm_area_struct * area);
 618	/* Called any time before splitting to check if it's allowed */
 619	int (*may_split)(struct vm_area_struct *area, unsigned long addr);
 620	int (*mremap)(struct vm_area_struct *area);
 621	/*
 622	 * Called by mprotect() to make driver-specific permission
 623	 * checks before mprotect() is finalised.   The VMA must not
 624	 * be modified.  Returns 0 if mprotect() can proceed.
 625	 */
 626	int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
 627			unsigned long end, unsigned long newflags);
 628	vm_fault_t (*fault)(struct vm_fault *vmf);
 629	vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
 630	vm_fault_t (*map_pages)(struct vm_fault *vmf,
 631			pgoff_t start_pgoff, pgoff_t end_pgoff);
 632	unsigned long (*pagesize)(struct vm_area_struct * area);
 633
 634	/* notification that a previously read-only page is about to become
 635	 * writable, if an error is returned it will cause a SIGBUS */
 636	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
 637
 638	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
 639	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
 640
 641	/* called by access_process_vm when get_user_pages() fails, typically
 642	 * for use by special VMAs. See also generic_access_phys() for a generic
 643	 * implementation useful for any iomem mapping.
 644	 */
 645	int (*access)(struct vm_area_struct *vma, unsigned long addr,
 646		      void *buf, int len, int write);
 647
 648	/* Called by the /proc/PID/maps code to ask the vma whether it
 649	 * has a special name.  Returning non-NULL will also cause this
 650	 * vma to be dumped unconditionally. */
 651	const char *(*name)(struct vm_area_struct *vma);
 652
 653#ifdef CONFIG_NUMA
 654	/*
 655	 * set_policy() op must add a reference to any non-NULL @new mempolicy
 656	 * to hold the policy upon return.  Caller should pass NULL @new to
 657	 * remove a policy and fall back to surrounding context--i.e. do not
 658	 * install a MPOL_DEFAULT policy, nor the task or system default
 659	 * mempolicy.
 660	 */
 661	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 662
 663	/*
 664	 * get_policy() op must add reference [mpol_get()] to any policy at
 665	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
 666	 * in mm/mempolicy.c will do this automatically.
 667	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
 668	 * marked as MPOL_SHARED. vma policies are protected by the mmap_lock.
 669	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
 670	 * must return NULL--i.e., do not "fallback" to task or system default
 671	 * policy.
 672	 */
 673	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
 674					unsigned long addr, pgoff_t *ilx);
 675#endif
 676	/*
 677	 * Called by vm_normal_page() for special PTEs to find the
 678	 * page for @addr.  This is useful if the default behavior
 679	 * (using pte_page()) would not find the correct page.
 680	 */
 681	struct page *(*find_special_page)(struct vm_area_struct *vma,
 682					  unsigned long addr);
 683};
 684
 685#ifdef CONFIG_NUMA_BALANCING
 686static inline void vma_numab_state_init(struct vm_area_struct *vma)
 687{
 688	vma->numab_state = NULL;
 689}
 690static inline void vma_numab_state_free(struct vm_area_struct *vma)
 691{
 692	kfree(vma->numab_state);
 693}
 694#else
 695static inline void vma_numab_state_init(struct vm_area_struct *vma) {}
 696static inline void vma_numab_state_free(struct vm_area_struct *vma) {}
 697#endif /* CONFIG_NUMA_BALANCING */
 698
 699#ifdef CONFIG_PER_VMA_LOCK
 700/*
 701 * Try to read-lock a vma. The function is allowed to occasionally yield false
 702 * locked result to avoid performance overhead, in which case we fall back to
 703 * using mmap_lock. The function should never yield false unlocked result.
 704 */
 705static inline bool vma_start_read(struct vm_area_struct *vma)
 706{
 707	/*
 708	 * Check before locking. A race might cause false locked result.
 709	 * We can use READ_ONCE() for the mm_lock_seq here, and don't need
 710	 * ACQUIRE semantics, because this is just a lockless check whose result
 711	 * we don't rely on for anything - the mm_lock_seq read against which we
 712	 * need ordering is below.
 713	 */
 714	if (READ_ONCE(vma->vm_lock_seq) == READ_ONCE(vma->vm_mm->mm_lock_seq))
 715		return false;
 716
 717	if (unlikely(down_read_trylock(&vma->vm_lock->lock) == 0))
 718		return false;
 719
 720	/*
 721	 * Overflow might produce false locked result.
 722	 * False unlocked result is impossible because we modify and check
 723	 * vma->vm_lock_seq under vma->vm_lock protection and mm->mm_lock_seq
 724	 * modification invalidates all existing locks.
 725	 *
 726	 * We must use ACQUIRE semantics for the mm_lock_seq so that if we are
 727	 * racing with vma_end_write_all(), we only start reading from the VMA
 728	 * after it has been unlocked.
 729	 * This pairs with RELEASE semantics in vma_end_write_all().
 730	 */
 731	if (unlikely(vma->vm_lock_seq == smp_load_acquire(&vma->vm_mm->mm_lock_seq))) {
 732		up_read(&vma->vm_lock->lock);
 733		return false;
 734	}
 735	return true;
 736}
 737
 738static inline void vma_end_read(struct vm_area_struct *vma)
 
 739{
 740	rcu_read_lock(); /* keeps vma alive till the end of up_read */
 741	up_read(&vma->vm_lock->lock);
 742	rcu_read_unlock();
 743}
 744
 745/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
 746static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
 747{
 748	mmap_assert_write_locked(vma->vm_mm);
 749
 750	/*
 751	 * current task is holding mmap_write_lock, both vma->vm_lock_seq and
 752	 * mm->mm_lock_seq can't be concurrently modified.
 753	 */
 754	*mm_lock_seq = vma->vm_mm->mm_lock_seq;
 755	return (vma->vm_lock_seq == *mm_lock_seq);
 756}
 757
 758/*
 759 * Begin writing to a VMA.
 760 * Exclude concurrent readers under the per-VMA lock until the currently
 761 * write-locked mmap_lock is dropped or downgraded.
 762 */
 763static inline void vma_start_write(struct vm_area_struct *vma)
 764{
 765	int mm_lock_seq;
 766
 767	if (__is_vma_write_locked(vma, &mm_lock_seq))
 768		return;
 769
 770	down_write(&vma->vm_lock->lock);
 771	/*
 772	 * We should use WRITE_ONCE() here because we can have concurrent reads
 773	 * from the early lockless pessimistic check in vma_start_read().
 774	 * We don't really care about the correctness of that early check, but
 775	 * we should use WRITE_ONCE() for cleanliness and to keep KCSAN happy.
 776	 */
 777	WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
 778	up_write(&vma->vm_lock->lock);
 779}
 780
 781static inline void vma_assert_write_locked(struct vm_area_struct *vma)
 782{
 783	int mm_lock_seq;
 784
 785	VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
 786}
 787
 788static inline void vma_assert_locked(struct vm_area_struct *vma)
 789{
 790	if (!rwsem_is_locked(&vma->vm_lock->lock))
 791		vma_assert_write_locked(vma);
 792}
 793
 794static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
 795{
 796	/* When detaching vma should be write-locked */
 797	if (detached)
 798		vma_assert_write_locked(vma);
 799	vma->detached = detached;
 800}
 801
 802static inline void release_fault_lock(struct vm_fault *vmf)
 803{
 804	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
 805		vma_end_read(vmf->vma);
 806	else
 807		mmap_read_unlock(vmf->vma->vm_mm);
 808}
 809
 810static inline void assert_fault_locked(struct vm_fault *vmf)
 811{
 812	if (vmf->flags & FAULT_FLAG_VMA_LOCK)
 813		vma_assert_locked(vmf->vma);
 814	else
 815		mmap_assert_locked(vmf->vma->vm_mm);
 816}
 817
 818struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
 819					  unsigned long address);
 820
 821#else /* CONFIG_PER_VMA_LOCK */
 822
 823static inline bool vma_start_read(struct vm_area_struct *vma)
 824		{ return false; }
 825static inline void vma_end_read(struct vm_area_struct *vma) {}
 826static inline void vma_start_write(struct vm_area_struct *vma) {}
 827static inline void vma_assert_write_locked(struct vm_area_struct *vma)
 828		{ mmap_assert_write_locked(vma->vm_mm); }
 829static inline void vma_mark_detached(struct vm_area_struct *vma,
 830				     bool detached) {}
 831
 832static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
 833		unsigned long address)
 834{
 835	return NULL;
 836}
 837
 838static inline void vma_assert_locked(struct vm_area_struct *vma)
 839{
 840	mmap_assert_locked(vma->vm_mm);
 841}
 842
 843static inline void release_fault_lock(struct vm_fault *vmf)
 844{
 845	mmap_read_unlock(vmf->vma->vm_mm);
 846}
 847
 848static inline void assert_fault_locked(struct vm_fault *vmf)
 849{
 850	mmap_assert_locked(vmf->vma->vm_mm);
 851}
 852
 853#endif /* CONFIG_PER_VMA_LOCK */
 854
 855extern const struct vm_operations_struct vma_dummy_vm_ops;
 856
 857/*
 858 * WARNING: vma_init does not initialize vma->vm_lock.
 859 * Use vm_area_alloc()/vm_area_free() if vma needs locking.
 
 
 
 
 
 
 
 
 860 */
 861static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
 862{
 863	memset(vma, 0, sizeof(*vma));
 864	vma->vm_mm = mm;
 865	vma->vm_ops = &vma_dummy_vm_ops;
 866	INIT_LIST_HEAD(&vma->anon_vma_chain);
 867	vma_mark_detached(vma, false);
 868	vma_numab_state_init(vma);
 869}
 870
 871/* Use when VMA is not part of the VMA tree and needs no locking */
 872static inline void vm_flags_init(struct vm_area_struct *vma,
 873				 vm_flags_t flags)
 874{
 875	ACCESS_PRIVATE(vma, __vm_flags) = flags;
 876}
 877
 878/*
 879 * Use when VMA is part of the VMA tree and modifications need coordination
 880 * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
 881 * it should be locked explicitly beforehand.
 882 */
 883static inline void vm_flags_reset(struct vm_area_struct *vma,
 884				  vm_flags_t flags)
 885{
 886	vma_assert_write_locked(vma);
 887	vm_flags_init(vma, flags);
 888}
 889
 890static inline void vm_flags_reset_once(struct vm_area_struct *vma,
 891				       vm_flags_t flags)
 892{
 893	vma_assert_write_locked(vma);
 894	WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
 895}
 896
 897static inline void vm_flags_set(struct vm_area_struct *vma,
 898				vm_flags_t flags)
 899{
 900	vma_start_write(vma);
 901	ACCESS_PRIVATE(vma, __vm_flags) |= flags;
 902}
 903
 904static inline void vm_flags_clear(struct vm_area_struct *vma,
 905				  vm_flags_t flags)
 906{
 907	vma_start_write(vma);
 908	ACCESS_PRIVATE(vma, __vm_flags) &= ~flags;
 909}
 910
 911/*
 912 * Use only if VMA is not part of the VMA tree or has no other users and
 913 * therefore needs no locking.
 
 
 914 */
 915static inline void __vm_flags_mod(struct vm_area_struct *vma,
 916				  vm_flags_t set, vm_flags_t clear)
 917{
 918	vm_flags_init(vma, (vma->vm_flags | set) & ~clear);
 919}
 920
 921/*
 922 * Use only when the order of set/clear operations is unimportant, otherwise
 923 * use vm_flags_{set|clear} explicitly.
 
 
 
 924 */
 925static inline void vm_flags_mod(struct vm_area_struct *vma,
 926				vm_flags_t set, vm_flags_t clear)
 927{
 928	vma_start_write(vma);
 929	__vm_flags_mod(vma, set, clear);
 930}
 931
 932static inline void vma_set_anonymous(struct vm_area_struct *vma)
 933{
 934	vma->vm_ops = NULL;
 935}
 936
 937static inline bool vma_is_anonymous(struct vm_area_struct *vma)
 938{
 939	return !vma->vm_ops;
 940}
 941
 942/*
 943 * Indicate if the VMA is a heap for the given task; for
 944 * /proc/PID/maps that is the heap of the main task.
 
 
 945 */
 946static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
 947{
 948	return vma->vm_start < vma->vm_mm->brk &&
 949		vma->vm_end > vma->vm_mm->start_brk;
 950}
 951
 952/*
 953 * Indicate if the VMA is a stack for the given task; for
 954 * /proc/PID/maps that is the stack of the main task.
 955 */
 956static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
 957{
 958	/*
 959	 * We make no effort to guess what a given thread considers to be
 960	 * its "stack".  It's not even well-defined for programs written
 961	 * languages like Go.
 962	 */
 963	return vma->vm_start <= vma->vm_mm->start_stack &&
 964		vma->vm_end >= vma->vm_mm->start_stack;
 965}
 966
 967static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
 
 
 968{
 969	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
 970
 971	if (!maybe_stack)
 972		return false;
 973
 974	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
 975						VM_STACK_INCOMPLETE_SETUP)
 976		return true;
 977
 978	return false;
 979}
 
 980
 981static inline bool vma_is_foreign(struct vm_area_struct *vma)
 982{
 983	if (!current->mm)
 984		return true;
 985
 986	if (current->mm != vma->vm_mm)
 987		return true;
 988
 989	return false;
 990}
 991
 992static inline bool vma_is_accessible(struct vm_area_struct *vma)
 993{
 994	return vma->vm_flags & VM_ACCESS_FLAGS;
 
 
 
 995}
 996
 997static inline bool is_shared_maywrite(vm_flags_t vm_flags)
 998{
 999	return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
1000		(VM_SHARED | VM_MAYWRITE);
 
 
1001}
1002
1003static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
1004{
1005	return is_shared_maywrite(vma->vm_flags);
 
 
 
 
 
1006}
1007
1008static inline
1009struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
1010{
1011	return mas_find(&vmi->mas, max - 1);
 
 
 
1012}
1013
1014static inline struct vm_area_struct *vma_next(struct vma_iterator *vmi)
1015{
1016	/*
1017	 * Uses mas_find() to get the first VMA when the iterator starts.
1018	 * Calling mas_next() could skip the first entry.
1019	 */
1020	return mas_find(&vmi->mas, ULONG_MAX);
1021}
1022
1023static inline
1024struct vm_area_struct *vma_iter_next_range(struct vma_iterator *vmi)
1025{
1026	return mas_next_range(&vmi->mas, ULONG_MAX);
 
 
 
 
 
 
1027}
1028
1029
1030static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi)
 
 
 
 
1031{
1032	return mas_prev(&vmi->mas, 0);
1033}
1034
1035static inline int vma_iter_clear_gfp(struct vma_iterator *vmi,
1036			unsigned long start, unsigned long end, gfp_t gfp)
1037{
1038	__mas_set_range(&vmi->mas, start, end - 1);
1039	mas_store_gfp(&vmi->mas, NULL, gfp);
1040	if (unlikely(mas_is_err(&vmi->mas)))
1041		return -ENOMEM;
1042
1043	return 0;
1044}
1045
1046/* Free any unused preallocations */
1047static inline void vma_iter_free(struct vma_iterator *vmi)
1048{
1049	mas_destroy(&vmi->mas);
1050}
1051
1052static inline int vma_iter_bulk_store(struct vma_iterator *vmi,
1053				      struct vm_area_struct *vma)
 
 
1054{
1055	vmi->mas.index = vma->vm_start;
1056	vmi->mas.last = vma->vm_end - 1;
1057	mas_store(&vmi->mas, vma);
1058	if (unlikely(mas_is_err(&vmi->mas)))
1059		return -ENOMEM;
1060
1061	return 0;
1062}
 
1063
1064static inline void vma_iter_invalidate(struct vma_iterator *vmi)
1065{
1066	mas_pause(&vmi->mas);
1067}
1068
1069static inline void vma_iter_set(struct vma_iterator *vmi, unsigned long addr)
1070{
1071	mas_set(&vmi->mas, addr);
1072}
1073
1074#define for_each_vma(__vmi, __vma)					\
1075	while (((__vma) = vma_next(&(__vmi))) != NULL)
1076
1077/* The MM code likes to work with exclusive end addresses */
1078#define for_each_vma_range(__vmi, __vma, __end)				\
1079	while (((__vma) = vma_find(&(__vmi), (__end))) != NULL)
1080
1081#ifdef CONFIG_SHMEM
1082/*
1083 * The vma_is_shmem is not inline because it is used only by slow
1084 * paths in userfault.
1085 */
1086bool vma_is_shmem(struct vm_area_struct *vma);
1087bool vma_is_anon_shmem(struct vm_area_struct *vma);
1088#else
1089static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1090static inline bool vma_is_anon_shmem(struct vm_area_struct *vma) { return false; }
1091#endif
1092
1093int vma_is_stack_for_current(struct vm_area_struct *vma);
1094
1095/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
1096#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
1097
1098struct mmu_gather;
1099struct inode;
1100
1101/*
1102 * compound_order() can be called without holding a reference, which means
1103 * that niceties like page_folio() don't work.  These callers should be
1104 * prepared to handle wild return values.  For example, PG_head may be
1105 * set before the order is initialised, or this may be a tail page.
1106 * See compaction.c for some good examples.
1107 */
1108static inline unsigned int compound_order(struct page *page)
1109{
1110	struct folio *folio = (struct folio *)page;
1111
1112	if (!test_bit(PG_head, &folio->flags))
1113		return 0;
1114	return folio->_flags_1 & 0xff;
1115}
1116
1117/**
1118 * folio_order - The allocation order of a folio.
1119 * @folio: The folio.
1120 *
1121 * A folio is composed of 2^order pages.  See get_order() for the definition
1122 * of order.
1123 *
1124 * Return: The order of the folio.
1125 */
1126static inline unsigned int folio_order(const struct folio *folio)
1127{
1128	if (!folio_test_large(folio))
1129		return 0;
1130	return folio->_flags_1 & 0xff;
1131}
1132
1133#include <linux/huge_mm.h>
1134
1135/*
1136 * Methods to modify the page usage count.
1137 *
1138 * What counts for a page usage:
1139 * - cache mapping   (page->mapping)
1140 * - private data    (page->private)
1141 * - page mapped in a task's page tables, each mapping
1142 *   is counted separately
1143 *
1144 * Also, many kernel routines increase the page count before a critical
1145 * routine so they can be sure the page doesn't go away from under them.
1146 */
1147
1148/*
1149 * Drop a ref, return true if the refcount fell to zero (the page has no users)
1150 */
1151static inline int put_page_testzero(struct page *page)
1152{
1153	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
1154	return page_ref_dec_and_test(page);
 
 
 
 
 
 
1155}
1156
1157static inline int folio_put_testzero(struct folio *folio)
1158{
1159	return put_page_testzero(&folio->page);
1160}
1161
1162/*
1163 * Try to grab a ref unless the page has a refcount of zero, return false if
1164 * that is the case.
1165 * This can be called when MMU is off so it must not access
1166 * any of the virtual mappings.
1167 */
1168static inline bool get_page_unless_zero(struct page *page)
1169{
1170	return page_ref_add_unless(page, 1, 0);
 
 
 
 
 
 
 
 
1171}
1172
1173static inline struct folio *folio_get_nontail_page(struct page *page)
1174{
1175	if (unlikely(!get_page_unless_zero(page)))
1176		return NULL;
1177	return (struct folio *)page;
1178}
1179
1180extern int page_is_ram(unsigned long pfn);
1181
1182enum {
1183	REGION_INTERSECTS,
1184	REGION_DISJOINT,
1185	REGION_MIXED,
1186};
1187
1188int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
1189		      unsigned long desc);
1190
1191/* Support for virtually mapped pages */
1192struct page *vmalloc_to_page(const void *addr);
1193unsigned long vmalloc_to_pfn(const void *addr);
1194
1195/*
1196 * Determine if an address is within the vmalloc range
1197 *
1198 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
1199 * is no special casing required.
1200 */
1201#ifdef CONFIG_MMU
1202extern bool is_vmalloc_addr(const void *x);
1203extern int is_vmalloc_or_module_addr(const void *x);
1204#else
1205static inline bool is_vmalloc_addr(const void *x)
1206{
1207	return false;
1208}
1209static inline int is_vmalloc_or_module_addr(const void *x)
1210{
1211	return 0;
1212}
1213#endif
1214
1215/*
1216 * How many times the entire folio is mapped as a single unit (eg by a
1217 * PMD or PUD entry).  This is probably not what you want, except for
1218 * debugging purposes or implementation of other core folio_*() primitives.
1219 */
1220static inline int folio_entire_mapcount(const struct folio *folio)
1221{
1222	VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
1223	return atomic_read(&folio->_entire_mapcount) + 1;
1224}
1225
1226static inline int folio_large_mapcount(const struct folio *folio)
1227{
1228	VM_WARN_ON_FOLIO(!folio_test_large(folio), folio);
1229	return atomic_read(&folio->_large_mapcount) + 1;
1230}
1231
1232/**
1233 * folio_mapcount() - Number of mappings of this folio.
1234 * @folio: The folio.
1235 *
1236 * The folio mapcount corresponds to the number of present user page table
1237 * entries that reference any part of a folio. Each such present user page
1238 * table entry must be paired with exactly on folio reference.
1239 *
1240 * For ordindary folios, each user page table entry (PTE/PMD/PUD/...) counts
1241 * exactly once.
1242 *
1243 * For hugetlb folios, each abstracted "hugetlb" user page table entry that
1244 * references the entire folio counts exactly once, even when such special
1245 * page table entries are comprised of multiple ordinary page table entries.
1246 *
1247 * Will report 0 for pages which cannot be mapped into userspace, such as
1248 * slab, page tables and similar.
1249 *
1250 * Return: The number of times this folio is mapped.
 
 
 
1251 */
1252static inline int folio_mapcount(const struct folio *folio)
1253{
1254	int mapcount;
1255
1256	if (likely(!folio_test_large(folio))) {
1257		mapcount = atomic_read(&folio->_mapcount) + 1;
1258		if (page_mapcount_is_type(mapcount))
1259			mapcount = 0;
1260		return mapcount;
1261	}
1262	return folio_large_mapcount(folio);
1263}
1264
1265/**
1266 * folio_mapped - Is this folio mapped into userspace?
1267 * @folio: The folio.
1268 *
1269 * Return: True if any page in this folio is referenced by user page tables.
1270 */
1271static inline bool folio_mapped(const struct folio *folio)
1272{
1273	return folio_mapcount(folio) >= 1;
1274}
1275
1276/*
1277 * Return true if this page is mapped into pagetables.
1278 * For compound page it returns true if any sub-page of compound page is mapped,
1279 * even if this particular sub-page is not itself mapped by any PTE or PMD.
1280 */
1281static inline bool page_mapped(const struct page *page)
1282{
1283	return folio_mapped(page_folio(page));
1284}
1285
1286static inline struct page *virt_to_head_page(const void *x)
1287{
1288	struct page *page = virt_to_page(x);
1289
1290	return compound_head(page);
1291}
1292
1293static inline struct folio *virt_to_folio(const void *x)
1294{
1295	struct page *page = virt_to_page(x);
1296
1297	return page_folio(page);
1298}
1299
1300void __folio_put(struct folio *folio);
 
1301
1302void split_page(struct page *page, unsigned int order);
1303void folio_copy(struct folio *dst, struct folio *src);
1304int folio_mc_copy(struct folio *dst, struct folio *src);
1305
1306unsigned long nr_free_buffer_pages(void);
 
 
 
 
 
1307
1308/* Returns the number of bytes in this potentially compound page. */
1309static inline unsigned long page_size(struct page *page)
1310{
1311	return PAGE_SIZE << compound_order(page);
1312}
1313
1314/* Returns the number of bits needed for the number of bytes in a page */
1315static inline unsigned int page_shift(struct page *page)
1316{
1317	return PAGE_SHIFT + compound_order(page);
1318}
1319
1320/**
1321 * thp_order - Order of a transparent huge page.
1322 * @page: Head page of a transparent huge page.
1323 */
1324static inline unsigned int thp_order(struct page *page)
1325{
1326	VM_BUG_ON_PGFLAGS(PageTail(page), page);
1327	return compound_order(page);
 
1328}
1329
1330/**
1331 * thp_size - Size of a transparent huge page.
1332 * @page: Head page of a transparent huge page.
1333 *
1334 * Return: Number of bytes in this page.
1335 */
1336static inline unsigned long thp_size(struct page *page)
1337{
1338	return PAGE_SIZE << thp_order(page);
1339}
1340
1341#ifdef CONFIG_MMU
1342/*
1343 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1344 * servicing faults for write access.  In the normal case, do always want
1345 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1346 * that do not have writing enabled, when used by access_process_vm.
1347 */
1348static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1349{
1350	if (likely(vma->vm_flags & VM_WRITE))
1351		pte = pte_mkwrite(pte, vma);
1352	return pte;
1353}
1354
1355vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
1356void set_pte_range(struct vm_fault *vmf, struct folio *folio,
1357		struct page *page, unsigned int nr, unsigned long addr);
1358
1359vm_fault_t finish_fault(struct vm_fault *vmf);
1360#endif
1361
1362/*
1363 * Multiple processes may "see" the same page. E.g. for untouched
1364 * mappings of /dev/null, all processes see the same page full of
1365 * zeroes, and text pages of executables and shared libraries have
1366 * only one copy in memory, at most, normally.
1367 *
1368 * For the non-reserved pages, page_count(page) denotes a reference count.
1369 *   page_count() == 0 means the page is free. page->lru is then used for
1370 *   freelist management in the buddy allocator.
1371 *   page_count() > 0  means the page has been allocated.
1372 *
1373 * Pages are allocated by the slab allocator in order to provide memory
1374 * to kmalloc and kmem_cache_alloc. In this case, the management of the
1375 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
1376 * unless a particular usage is carefully commented. (the responsibility of
1377 * freeing the kmalloc memory is the caller's, of course).
1378 *
1379 * A page may be used by anyone else who does a __get_free_page().
1380 * In this case, page_count still tracks the references, and should only
1381 * be used through the normal accessor functions. The top bits of page->flags
1382 * and page->virtual store page management information, but all other fields
1383 * are unused and could be used privately, carefully. The management of this
1384 * page is the responsibility of the one who allocated it, and those who have
1385 * subsequently been given references to it.
1386 *
1387 * The other pages (we may call them "pagecache pages") are completely
1388 * managed by the Linux memory manager: I/O, buffers, swapping etc.
1389 * The following discussion applies only to them.
1390 *
1391 * A pagecache page contains an opaque `private' member, which belongs to the
1392 * page's address_space. Usually, this is the address of a circular list of
1393 * the page's disk buffers. PG_private must be set to tell the VM to call
1394 * into the filesystem to release these pages.
1395 *
1396 * A page may belong to an inode's memory mapping. In this case, page->mapping
1397 * is the pointer to the inode, and page->index is the file offset of the page,
1398 * in units of PAGE_SIZE.
1399 *
1400 * If pagecache pages are not associated with an inode, they are said to be
1401 * anonymous pages. These may become associated with the swapcache, and in that
1402 * case PG_swapcache is set, and page->private is an offset into the swapcache.
1403 *
1404 * In either case (swapcache or inode backed), the pagecache itself holds one
1405 * reference to the page. Setting PG_private should also increment the
1406 * refcount. The each user mapping also has a reference to the page.
1407 *
1408 * The pagecache pages are stored in a per-mapping radix tree, which is
1409 * rooted at mapping->i_pages, and indexed by offset.
1410 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
1411 * lists, we instead now tag pages as dirty/writeback in the radix tree.
1412 *
1413 * All pagecache pages may be subject to I/O:
1414 * - inode pages may need to be read from disk,
1415 * - inode pages which have been modified and are MAP_SHARED may need
1416 *   to be written back to the inode on disk,
1417 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
1418 *   modified may need to be swapped out to swap space and (later) to be read
1419 *   back into memory.
1420 */
1421
1422#if defined(CONFIG_ZONE_DEVICE) && defined(CONFIG_FS_DAX)
1423DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
1424
1425bool __put_devmap_managed_folio_refs(struct folio *folio, int refs);
1426static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1427{
1428	if (!static_branch_unlikely(&devmap_managed_key))
1429		return false;
1430	if (!folio_is_zone_device(folio))
1431		return false;
1432	return __put_devmap_managed_folio_refs(folio, refs);
1433}
1434#else /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1435static inline bool put_devmap_managed_folio_refs(struct folio *folio, int refs)
1436{
1437	return false;
1438}
1439#endif /* CONFIG_ZONE_DEVICE && CONFIG_FS_DAX */
1440
1441/* 127: arbitrary random number, small enough to assemble well */
1442#define folio_ref_zero_or_close_to_overflow(folio) \
1443	((unsigned int) folio_ref_count(folio) + 127u <= 127u)
1444
1445/**
1446 * folio_get - Increment the reference count on a folio.
1447 * @folio: The folio.
1448 *
1449 * Context: May be called in any context, as long as you know that
1450 * you have a refcount on the folio.  If you do not already have one,
1451 * folio_try_get() may be the right interface for you to use.
1452 */
1453static inline void folio_get(struct folio *folio)
1454{
1455	VM_BUG_ON_FOLIO(folio_ref_zero_or_close_to_overflow(folio), folio);
1456	folio_ref_inc(folio);
1457}
1458
1459static inline void get_page(struct page *page)
1460{
1461	folio_get(page_folio(page));
1462}
1463
1464static inline __must_check bool try_get_page(struct page *page)
1465{
1466	page = compound_head(page);
1467	if (WARN_ON_ONCE(page_ref_count(page) <= 0))
1468		return false;
1469	page_ref_inc(page);
1470	return true;
1471}
1472
1473/**
1474 * folio_put - Decrement the reference count on a folio.
1475 * @folio: The folio.
1476 *
1477 * If the folio's reference count reaches zero, the memory will be
1478 * released back to the page allocator and may be used by another
1479 * allocation immediately.  Do not access the memory or the struct folio
1480 * after calling folio_put() unless you can be sure that it wasn't the
1481 * last reference.
1482 *
1483 * Context: May be called in process or interrupt context, but not in NMI
1484 * context.  May be called while holding a spinlock.
1485 */
1486static inline void folio_put(struct folio *folio)
1487{
1488	if (folio_put_testzero(folio))
1489		__folio_put(folio);
1490}
1491
1492/**
1493 * folio_put_refs - Reduce the reference count on a folio.
1494 * @folio: The folio.
1495 * @refs: The amount to subtract from the folio's reference count.
1496 *
1497 * If the folio's reference count reaches zero, the memory will be
1498 * released back to the page allocator and may be used by another
1499 * allocation immediately.  Do not access the memory or the struct folio
1500 * after calling folio_put_refs() unless you can be sure that these weren't
1501 * the last references.
1502 *
1503 * Context: May be called in process or interrupt context, but not in NMI
1504 * context.  May be called while holding a spinlock.
1505 */
1506static inline void folio_put_refs(struct folio *folio, int refs)
1507{
1508	if (folio_ref_sub_and_test(folio, refs))
1509		__folio_put(folio);
1510}
1511
1512void folios_put_refs(struct folio_batch *folios, unsigned int *refs);
1513
1514/*
1515 * union release_pages_arg - an array of pages or folios
1516 *
1517 * release_pages() releases a simple array of multiple pages, and
1518 * accepts various different forms of said page array: either
1519 * a regular old boring array of pages, an array of folios, or
1520 * an array of encoded page pointers.
1521 *
1522 * The transparent union syntax for this kind of "any of these
1523 * argument types" is all kinds of ugly, so look away.
1524 */
1525typedef union {
1526	struct page **pages;
1527	struct folio **folios;
1528	struct encoded_page **encoded_pages;
1529} release_pages_arg __attribute__ ((__transparent_union__));
1530
1531void release_pages(release_pages_arg, int nr);
 
 
 
 
 
 
 
 
 
1532
1533/**
1534 * folios_put - Decrement the reference count on an array of folios.
1535 * @folios: The folios.
1536 *
1537 * Like folio_put(), but for a batch of folios.  This is more efficient
1538 * than writing the loop yourself as it will optimise the locks which need
1539 * to be taken if the folios are freed.  The folios batch is returned
1540 * empty and ready to be reused for another batch; there is no need to
1541 * reinitialise it.
1542 *
1543 * Context: May be called in process or interrupt context, but not in NMI
1544 * context.  May be called while holding a spinlock.
1545 */
1546static inline void folios_put(struct folio_batch *folios)
1547{
1548	folios_put_refs(folios, NULL);
1549}
1550
1551static inline void put_page(struct page *page)
1552{
1553	struct folio *folio = page_folio(page);
1554
1555	/*
1556	 * For some devmap managed pages we need to catch refcount transition
1557	 * from 2 to 1:
1558	 */
1559	if (put_devmap_managed_folio_refs(folio, 1))
1560		return;
1561	folio_put(folio);
1562}
1563
1564/*
1565 * GUP_PIN_COUNTING_BIAS, and the associated functions that use it, overload
1566 * the page's refcount so that two separate items are tracked: the original page
1567 * reference count, and also a new count of how many pin_user_pages() calls were
1568 * made against the page. ("gup-pinned" is another term for the latter).
1569 *
1570 * With this scheme, pin_user_pages() becomes special: such pages are marked as
1571 * distinct from normal pages. As such, the unpin_user_page() call (and its
1572 * variants) must be used in order to release gup-pinned pages.
1573 *
1574 * Choice of value:
1575 *
1576 * By making GUP_PIN_COUNTING_BIAS a power of two, debugging of page reference
1577 * counts with respect to pin_user_pages() and unpin_user_page() becomes
1578 * simpler, due to the fact that adding an even power of two to the page
1579 * refcount has the effect of using only the upper N bits, for the code that
1580 * counts up using the bias value. This means that the lower bits are left for
1581 * the exclusive use of the original code that increments and decrements by one
1582 * (or at least, by much smaller values than the bias value).
1583 *
1584 * Of course, once the lower bits overflow into the upper bits (and this is
1585 * OK, because subtraction recovers the original values), then visual inspection
1586 * no longer suffices to directly view the separate counts. However, for normal
1587 * applications that don't have huge page reference counts, this won't be an
1588 * issue.
1589 *
1590 * Locking: the lockless algorithm described in folio_try_get_rcu()
1591 * provides safe operation for get_user_pages(), folio_mkclean() and
1592 * other calls that race to set up page table entries.
1593 */
1594#define GUP_PIN_COUNTING_BIAS (1U << 10)
1595
1596void unpin_user_page(struct page *page);
1597void unpin_folio(struct folio *folio);
1598void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
1599				 bool make_dirty);
1600void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
1601				      bool make_dirty);
1602void unpin_user_pages(struct page **pages, unsigned long npages);
1603void unpin_user_folio(struct folio *folio, unsigned long npages);
1604void unpin_folios(struct folio **folios, unsigned long nfolios);
1605
1606static inline bool is_cow_mapping(vm_flags_t flags)
1607{
1608	return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
1609}
 
1610
1611#ifndef CONFIG_MMU
1612static inline bool is_nommu_shared_mapping(vm_flags_t flags)
1613{
1614	/*
1615	 * NOMMU shared mappings are ordinary MAP_SHARED mappings and selected
1616	 * R/O MAP_PRIVATE file mappings that are an effective R/O overlay of
1617	 * a file mapping. R/O MAP_PRIVATE mappings might still modify
1618	 * underlying memory if ptrace is active, so this is only possible if
1619	 * ptrace does not apply. Note that there is no mprotect() to upgrade
1620	 * write permissions later.
1621	 */
1622	return flags & (VM_MAYSHARE | VM_MAYOVERLAY);
1623}
1624#endif
1625
1626#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
1627#define SECTION_IN_PAGE_FLAGS
1628#endif
1629
1630/*
1631 * The identification function is mainly used by the buddy allocator for
1632 * determining if two pages could be buddies. We are not really identifying
1633 * the zone since we could be using the section number id if we do not have
1634 * node id available in page flags.
1635 * We only guarantee that it will return the same value for two combinable
1636 * pages in a zone.
1637 */
1638static inline int page_zone_id(struct page *page)
1639{
1640	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
1641}
1642
 
 
 
 
 
 
 
 
 
1643#ifdef NODE_NOT_IN_PAGE_FLAGS
1644int page_to_nid(const struct page *page);
1645#else
1646static inline int page_to_nid(const struct page *page)
1647{
1648	return (PF_POISONED_CHECK(page)->flags >> NODES_PGSHIFT) & NODES_MASK;
1649}
1650#endif
1651
1652static inline int folio_nid(const struct folio *folio)
1653{
1654	return page_to_nid(&folio->page);
1655}
1656
1657#ifdef CONFIG_NUMA_BALANCING
1658/* page access time bits needs to hold at least 4 seconds */
1659#define PAGE_ACCESS_TIME_MIN_BITS	12
1660#if LAST_CPUPID_SHIFT < PAGE_ACCESS_TIME_MIN_BITS
1661#define PAGE_ACCESS_TIME_BUCKETS				\
1662	(PAGE_ACCESS_TIME_MIN_BITS - LAST_CPUPID_SHIFT)
1663#else
1664#define PAGE_ACCESS_TIME_BUCKETS	0
1665#endif
1666
1667#define PAGE_ACCESS_TIME_MASK				\
1668	(LAST_CPUPID_MASK << PAGE_ACCESS_TIME_BUCKETS)
1669
1670static inline int cpu_pid_to_cpupid(int cpu, int pid)
1671{
1672	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
1673}
1674
1675static inline int cpupid_to_pid(int cpupid)
1676{
1677	return cpupid & LAST__PID_MASK;
1678}
1679
1680static inline int cpupid_to_cpu(int cpupid)
1681{
1682	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
1683}
1684
1685static inline int cpupid_to_nid(int cpupid)
1686{
1687	return cpu_to_node(cpupid_to_cpu(cpupid));
1688}
1689
1690static inline bool cpupid_pid_unset(int cpupid)
1691{
1692	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
1693}
1694
1695static inline bool cpupid_cpu_unset(int cpupid)
1696{
1697	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
1698}
1699
1700static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
1701{
1702	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
1703}
1704
1705#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
1706#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
1707static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1708{
1709	return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
1710}
1711
1712static inline int folio_last_cpupid(struct folio *folio)
1713{
1714	return folio->_last_cpupid;
1715}
1716static inline void page_cpupid_reset_last(struct page *page)
1717{
1718	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
1719}
1720#else
1721static inline int folio_last_cpupid(struct folio *folio)
1722{
1723	return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
1724}
1725
1726int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
1727
1728static inline void page_cpupid_reset_last(struct page *page)
1729{
1730	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
 
 
 
1731}
1732#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
1733
1734static inline int folio_xchg_access_time(struct folio *folio, int time)
1735{
1736	int last_time;
1737
1738	last_time = folio_xchg_last_cpupid(folio,
1739					   time >> PAGE_ACCESS_TIME_BUCKETS);
1740	return last_time << PAGE_ACCESS_TIME_BUCKETS;
1741}
1742
1743static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1744{
1745	unsigned int pid_bit;
1746
1747	pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
1748	if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
1749		__set_bit(pid_bit, &vma->numab_state->pids_active[1]);
1750	}
1751}
1752
1753bool folio_use_access_time(struct folio *folio);
1754#else /* !CONFIG_NUMA_BALANCING */
1755static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
1756{
1757	return folio_nid(folio); /* XXX */
1758}
1759
1760static inline int folio_xchg_access_time(struct folio *folio, int time)
1761{
1762	return 0;
1763}
1764
1765static inline int folio_last_cpupid(struct folio *folio)
1766{
1767	return folio_nid(folio); /* XXX */
1768}
1769
1770static inline int cpupid_to_nid(int cpupid)
1771{
1772	return -1;
1773}
1774
1775static inline int cpupid_to_pid(int cpupid)
1776{
1777	return -1;
1778}
1779
1780static inline int cpupid_to_cpu(int cpupid)
1781{
1782	return -1;
1783}
1784
1785static inline int cpu_pid_to_cpupid(int nid, int pid)
1786{
1787	return -1;
1788}
1789
1790static inline bool cpupid_pid_unset(int cpupid)
1791{
1792	return true;
1793}
1794
1795static inline void page_cpupid_reset_last(struct page *page)
1796{
1797}
1798
1799static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1800{
1801	return false;
1802}
1803
1804static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
1805{
1806}
1807static inline bool folio_use_access_time(struct folio *folio)
1808{
1809	return false;
1810}
1811#endif /* CONFIG_NUMA_BALANCING */
1812
1813#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
1814
1815/*
1816 * KASAN per-page tags are stored xor'ed with 0xff. This allows to avoid
1817 * setting tags for all pages to native kernel tag value 0xff, as the default
1818 * value 0x00 maps to 0xff.
1819 */
1820
1821static inline u8 page_kasan_tag(const struct page *page)
1822{
1823	u8 tag = KASAN_TAG_KERNEL;
1824
1825	if (kasan_enabled()) {
1826		tag = (page->flags >> KASAN_TAG_PGSHIFT) & KASAN_TAG_MASK;
1827		tag ^= 0xff;
1828	}
1829
1830	return tag;
1831}
1832
1833static inline void page_kasan_tag_set(struct page *page, u8 tag)
1834{
1835	unsigned long old_flags, flags;
1836
1837	if (!kasan_enabled())
1838		return;
1839
1840	tag ^= 0xff;
1841	old_flags = READ_ONCE(page->flags);
1842	do {
1843		flags = old_flags;
1844		flags &= ~(KASAN_TAG_MASK << KASAN_TAG_PGSHIFT);
1845		flags |= (tag & KASAN_TAG_MASK) << KASAN_TAG_PGSHIFT;
1846	} while (unlikely(!try_cmpxchg(&page->flags, &old_flags, flags)));
1847}
1848
1849static inline void page_kasan_tag_reset(struct page *page)
1850{
1851	if (kasan_enabled())
1852		page_kasan_tag_set(page, KASAN_TAG_KERNEL);
1853}
1854
1855#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1856
1857static inline u8 page_kasan_tag(const struct page *page)
1858{
1859	return 0xff;
1860}
1861
1862static inline void page_kasan_tag_set(struct page *page, u8 tag) { }
1863static inline void page_kasan_tag_reset(struct page *page) { }
1864
1865#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
1866
1867static inline struct zone *page_zone(const struct page *page)
1868{
1869	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1870}
1871
1872static inline pg_data_t *page_pgdat(const struct page *page)
1873{
1874	return NODE_DATA(page_to_nid(page));
1875}
1876
1877static inline struct zone *folio_zone(const struct folio *folio)
1878{
1879	return page_zone(&folio->page);
1880}
1881
1882static inline pg_data_t *folio_pgdat(const struct folio *folio)
1883{
1884	return page_pgdat(&folio->page);
1885}
1886
1887#ifdef SECTION_IN_PAGE_FLAGS
1888static inline void set_page_section(struct page *page, unsigned long section)
1889{
1890	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1891	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1892}
1893
1894static inline unsigned long page_to_section(const struct page *page)
1895{
1896	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1897}
1898#endif
1899
1900/**
1901 * folio_pfn - Return the Page Frame Number of a folio.
1902 * @folio: The folio.
1903 *
1904 * A folio may contain multiple pages.  The pages have consecutive
1905 * Page Frame Numbers.
1906 *
1907 * Return: The Page Frame Number of the first page in the folio.
1908 */
1909static inline unsigned long folio_pfn(const struct folio *folio)
1910{
1911	return page_to_pfn(&folio->page);
1912}
1913
1914static inline struct folio *pfn_folio(unsigned long pfn)
1915{
1916	return page_folio(pfn_to_page(pfn));
1917}
1918
1919/**
1920 * folio_maybe_dma_pinned - Report if a folio may be pinned for DMA.
1921 * @folio: The folio.
1922 *
1923 * This function checks if a folio has been pinned via a call to
1924 * a function in the pin_user_pages() family.
1925 *
1926 * For small folios, the return value is partially fuzzy: false is not fuzzy,
1927 * because it means "definitely not pinned for DMA", but true means "probably
1928 * pinned for DMA, but possibly a false positive due to having at least
1929 * GUP_PIN_COUNTING_BIAS worth of normal folio references".
1930 *
1931 * False positives are OK, because: a) it's unlikely for a folio to
1932 * get that many refcounts, and b) all the callers of this routine are
1933 * expected to be able to deal gracefully with a false positive.
1934 *
1935 * For large folios, the result will be exactly correct. That's because
1936 * we have more tracking data available: the _pincount field is used
1937 * instead of the GUP_PIN_COUNTING_BIAS scheme.
1938 *
1939 * For more information, please see Documentation/core-api/pin_user_pages.rst.
1940 *
1941 * Return: True, if it is likely that the folio has been "dma-pinned".
1942 * False, if the folio is definitely not dma-pinned.
1943 */
1944static inline bool folio_maybe_dma_pinned(struct folio *folio)
1945{
1946	if (folio_test_large(folio))
1947		return atomic_read(&folio->_pincount) > 0;
1948
1949	/*
1950	 * folio_ref_count() is signed. If that refcount overflows, then
1951	 * folio_ref_count() returns a negative value, and callers will avoid
1952	 * further incrementing the refcount.
1953	 *
1954	 * Here, for that overflow case, use the sign bit to count a little
1955	 * bit higher via unsigned math, and thus still get an accurate result.
1956	 */
1957	return ((unsigned int)folio_ref_count(folio)) >=
1958		GUP_PIN_COUNTING_BIAS;
1959}
1960
1961/*
1962 * This should most likely only be called during fork() to see whether we
1963 * should break the cow immediately for an anon page on the src mm.
1964 *
1965 * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
1966 */
1967static inline bool folio_needs_cow_for_dma(struct vm_area_struct *vma,
1968					  struct folio *folio)
1969{
1970	VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
1971
1972	if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
1973		return false;
1974
1975	return folio_maybe_dma_pinned(folio);
1976}
1977
1978/**
1979 * is_zero_page - Query if a page is a zero page
1980 * @page: The page to query
1981 *
1982 * This returns true if @page is one of the permanent zero pages.
1983 */
1984static inline bool is_zero_page(const struct page *page)
1985{
1986	return is_zero_pfn(page_to_pfn(page));
1987}
1988
1989/**
1990 * is_zero_folio - Query if a folio is a zero page
1991 * @folio: The folio to query
1992 *
1993 * This returns true if @folio is one of the permanent zero pages.
1994 */
1995static inline bool is_zero_folio(const struct folio *folio)
1996{
1997	return is_zero_page(&folio->page);
1998}
1999
2000/* MIGRATE_CMA and ZONE_MOVABLE do not allow pin folios */
2001#ifdef CONFIG_MIGRATION
2002static inline bool folio_is_longterm_pinnable(struct folio *folio)
2003{
2004#ifdef CONFIG_CMA
2005	int mt = folio_migratetype(folio);
2006
2007	if (mt == MIGRATE_CMA || mt == MIGRATE_ISOLATE)
2008		return false;
2009#endif
2010	/* The zero page can be "pinned" but gets special handling. */
2011	if (is_zero_folio(folio))
2012		return true;
2013
2014	/* Coherent device memory must always allow eviction. */
2015	if (folio_is_device_coherent(folio))
2016		return false;
2017
2018	/* Otherwise, non-movable zone folios can be pinned. */
2019	return !folio_is_zone_movable(folio);
2020
2021}
2022#else
2023static inline bool folio_is_longterm_pinnable(struct folio *folio)
2024{
2025	return true;
2026}
2027#endif
2028
2029static inline void set_page_zone(struct page *page, enum zone_type zone)
2030{
2031	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
2032	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
2033}
2034
2035static inline void set_page_node(struct page *page, unsigned long node)
2036{
2037	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
2038	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
2039}
2040
2041static inline void set_page_links(struct page *page, enum zone_type zone,
2042	unsigned long node, unsigned long pfn)
2043{
2044	set_page_zone(page, zone);
2045	set_page_node(page, node);
2046#ifdef SECTION_IN_PAGE_FLAGS
2047	set_page_section(page, pfn_to_section_nr(pfn));
2048#endif
2049}
2050
2051/**
2052 * folio_nr_pages - The number of pages in the folio.
2053 * @folio: The folio.
2054 *
2055 * Return: A positive power of two.
2056 */
2057static inline long folio_nr_pages(const struct folio *folio)
2058{
2059	if (!folio_test_large(folio))
2060		return 1;
2061#ifdef CONFIG_64BIT
2062	return folio->_folio_nr_pages;
2063#else
2064	return 1L << (folio->_flags_1 & 0xff);
2065#endif
2066}
2067
2068/* Only hugetlbfs can allocate folios larger than MAX_ORDER */
2069#ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
2070#define MAX_FOLIO_NR_PAGES	(1UL << PUD_ORDER)
2071#else
2072#define MAX_FOLIO_NR_PAGES	MAX_ORDER_NR_PAGES
2073#endif
2074
2075/*
2076 * compound_nr() returns the number of pages in this potentially compound
2077 * page.  compound_nr() can be called on a tail page, and is defined to
2078 * return 1 in that case.
2079 */
2080static inline unsigned long compound_nr(struct page *page)
2081{
2082	struct folio *folio = (struct folio *)page;
2083
2084	if (!test_bit(PG_head, &folio->flags))
2085		return 1;
2086#ifdef CONFIG_64BIT
2087	return folio->_folio_nr_pages;
2088#else
2089	return 1L << (folio->_flags_1 & 0xff);
2090#endif
2091}
2092
2093/**
2094 * thp_nr_pages - The number of regular pages in this huge page.
2095 * @page: The head page of a huge page.
2096 */
2097static inline int thp_nr_pages(struct page *page)
2098{
2099	return folio_nr_pages((struct folio *)page);
2100}
2101
2102/**
2103 * folio_next - Move to the next physical folio.
2104 * @folio: The folio we're currently operating on.
2105 *
2106 * If you have physically contiguous memory which may span more than
2107 * one folio (eg a &struct bio_vec), use this function to move from one
2108 * folio to the next.  Do not use it if the memory is only virtually
2109 * contiguous as the folios are almost certainly not adjacent to each
2110 * other.  This is the folio equivalent to writing ``page++``.
2111 *
2112 * Context: We assume that the folios are refcounted and/or locked at a
2113 * higher level and do not adjust the reference counts.
2114 * Return: The next struct folio.
2115 */
2116static inline struct folio *folio_next(struct folio *folio)
2117{
2118	return (struct folio *)folio_page(folio, folio_nr_pages(folio));
2119}
2120
2121/**
2122 * folio_shift - The size of the memory described by this folio.
2123 * @folio: The folio.
2124 *
2125 * A folio represents a number of bytes which is a power-of-two in size.
2126 * This function tells you which power-of-two the folio is.  See also
2127 * folio_size() and folio_order().
2128 *
2129 * Context: The caller should have a reference on the folio to prevent
2130 * it from being split.  It is not necessary for the folio to be locked.
2131 * Return: The base-2 logarithm of the size of this folio.
2132 */
2133static inline unsigned int folio_shift(const struct folio *folio)
2134{
2135	return PAGE_SHIFT + folio_order(folio);
2136}
2137
2138/**
2139 * folio_size - The number of bytes in a folio.
2140 * @folio: The folio.
2141 *
2142 * Context: The caller should have a reference on the folio to prevent
2143 * it from being split.  It is not necessary for the folio to be locked.
2144 * Return: The number of bytes in this folio.
2145 */
2146static inline size_t folio_size(const struct folio *folio)
2147{
2148	return PAGE_SIZE << folio_order(folio);
2149}
2150
2151/**
2152 * folio_likely_mapped_shared - Estimate if the folio is mapped into the page
2153 *				tables of more than one MM
2154 * @folio: The folio.
2155 *
2156 * This function checks if the folio is currently mapped into more than one
2157 * MM ("mapped shared"), or if the folio is only mapped into a single MM
2158 * ("mapped exclusively").
2159 *
2160 * For KSM folios, this function also returns "mapped shared" when a folio is
2161 * mapped multiple times into the same MM, because the individual page mappings
2162 * are independent.
2163 *
2164 * As precise information is not easily available for all folios, this function
2165 * estimates the number of MMs ("sharers") that are currently mapping a folio
2166 * using the number of times the first page of the folio is currently mapped
2167 * into page tables.
2168 *
2169 * For small anonymous folios and anonymous hugetlb folios, the return
2170 * value will be exactly correct: non-KSM folios can only be mapped at most once
2171 * into an MM, and they cannot be partially mapped. KSM folios are
2172 * considered shared even if mapped multiple times into the same MM.
2173 *
2174 * For other folios, the result can be fuzzy:
2175 *    #. For partially-mappable large folios (THP), the return value can wrongly
2176 *       indicate "mapped exclusively" (false negative) when the folio is
2177 *       only partially mapped into at least one MM.
2178 *    #. For pagecache folios (including hugetlb), the return value can wrongly
2179 *       indicate "mapped shared" (false positive) when two VMAs in the same MM
2180 *       cover the same file range.
2181 *
2182 * Further, this function only considers current page table mappings that
2183 * are tracked using the folio mapcount(s).
2184 *
2185 * This function does not consider:
2186 *    #. If the folio might get mapped in the (near) future (e.g., swapcache,
2187 *       pagecache, temporary unmapping for migration).
2188 *    #. If the folio is mapped differently (VM_PFNMAP).
2189 *    #. If hugetlb page table sharing applies. Callers might want to check
2190 *       hugetlb_pmd_shared().
2191 *
2192 * Return: Whether the folio is estimated to be mapped into more than one MM.
2193 */
2194static inline bool folio_likely_mapped_shared(struct folio *folio)
2195{
2196	int mapcount = folio_mapcount(folio);
2197
2198	/* Only partially-mappable folios require more care. */
2199	if (!folio_test_large(folio) || unlikely(folio_test_hugetlb(folio)))
2200		return mapcount > 1;
2201
2202	/* A single mapping implies "mapped exclusively". */
2203	if (mapcount <= 1)
2204		return false;
2205
2206	/* If any page is mapped more than once we treat it "mapped shared". */
2207	if (folio_entire_mapcount(folio) || mapcount > folio_nr_pages(folio))
2208		return true;
2209
2210	/* Let's guess based on the first subpage. */
2211	return atomic_read(&folio->_mapcount) > 0;
2212}
2213
2214#ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
2215static inline int arch_make_folio_accessible(struct folio *folio)
2216{
2217	return 0;
2218}
2219#endif
2220
2221/*
2222 * Some inline functions in vmstat.h depend on page_zone()
2223 */
2224#include <linux/vmstat.h>
2225
2226#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
2227#define HASHED_PAGE_VIRTUAL
2228#endif
2229
2230#if defined(WANT_PAGE_VIRTUAL)
2231static inline void *page_address(const struct page *page)
2232{
2233	return page->virtual;
2234}
2235static inline void set_page_address(struct page *page, void *address)
2236{
2237	page->virtual = address;
2238}
2239#define page_address_init()  do { } while(0)
2240#endif
2241
2242#if defined(HASHED_PAGE_VIRTUAL)
2243void *page_address(const struct page *page);
2244void set_page_address(struct page *page, void *virtual);
2245void page_address_init(void);
2246#endif
2247
2248static __always_inline void *lowmem_page_address(const struct page *page)
2249{
2250	return page_to_virt(page);
2251}
2252
2253#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
2254#define page_address(page) lowmem_page_address(page)
2255#define set_page_address(page, address)  do { } while(0)
2256#define page_address_init()  do { } while(0)
2257#endif
2258
2259static inline void *folio_address(const struct folio *folio)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2260{
2261	return page_address(&folio->page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2262}
2263
2264/*
2265 * Return true only if the page has been allocated with
2266 * ALLOC_NO_WATERMARKS and the low watermark was not
2267 * met implying that the system is under some pressure.
2268 */
2269static inline bool page_is_pfmemalloc(const struct page *page)
2270{
2271	/*
2272	 * lru.next has bit 1 set if the page is allocated from the
2273	 * pfmemalloc reserves.  Callers may simply overwrite it if
2274	 * they do not need to preserve that information.
2275	 */
2276	return (uintptr_t)page->lru.next & BIT(1);
2277}
2278
 
 
2279/*
2280 * Return true only if the folio has been allocated with
2281 * ALLOC_NO_WATERMARKS and the low watermark was not
2282 * met implying that the system is under some pressure.
2283 */
2284static inline bool folio_is_pfmemalloc(const struct folio *folio)
2285{
2286	/*
2287	 * lru.next has bit 1 set if the page is allocated from the
2288	 * pfmemalloc reserves.  Callers may simply overwrite it if
2289	 * they do not need to preserve that information.
2290	 */
2291	return (uintptr_t)folio->lru.next & BIT(1);
2292}
2293
2294/*
2295 * Only to be called by the page allocator on a freshly allocated
2296 * page.
2297 */
2298static inline void set_page_pfmemalloc(struct page *page)
2299{
2300	page->lru.next = (void *)BIT(1);
2301}
2302
2303static inline void clear_page_pfmemalloc(struct page *page)
2304{
2305	page->lru.next = NULL;
2306}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2307
2308/*
2309 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
2310 */
2311extern void pagefault_out_of_memory(void);
2312
2313#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
2314#define offset_in_thp(page, p)	((unsigned long)(p) & (thp_size(page) - 1))
2315#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
2316
2317/*
2318 * Parameter block passed down to zap_pte_range in exceptional cases.
 
2319 */
2320struct zap_details {
2321	struct folio *single_folio;	/* Locked folio to be unmapped */
2322	bool even_cows;			/* Zap COWed private pages too? */
2323	zap_flags_t zap_flags;		/* Extra flags for zapping */
2324};
2325
2326/*
2327 * Whether to drop the pte markers, for example, the uffd-wp information for
2328 * file-backed memory.  This should only be specified when we will completely
2329 * drop the page in the mm, either by truncation or unmapping of the vma.  By
2330 * default, the flag is not set.
2331 */
2332#define  ZAP_FLAG_DROP_MARKER        ((__force zap_flags_t) BIT(0))
2333/* Set in unmap_vmas() to indicate a final unmap call.  Only used by hugetlb */
2334#define  ZAP_FLAG_UNMAP              ((__force zap_flags_t) BIT(1))
2335
2336#ifdef CONFIG_SCHED_MM_CID
2337void sched_mm_cid_before_execve(struct task_struct *t);
2338void sched_mm_cid_after_execve(struct task_struct *t);
2339void sched_mm_cid_fork(struct task_struct *t);
2340void sched_mm_cid_exit_signals(struct task_struct *t);
2341static inline int task_mm_cid(struct task_struct *t)
2342{
2343	return t->mm_cid;
2344}
2345#else
2346static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
2347static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
2348static inline void sched_mm_cid_fork(struct task_struct *t) { }
2349static inline void sched_mm_cid_exit_signals(struct task_struct *t) { }
2350static inline int task_mm_cid(struct task_struct *t)
2351{
2352	/*
2353	 * Use the processor id as a fall-back when the mm cid feature is
2354	 * disabled. This provides functional per-cpu data structure accesses
2355	 * in user-space, althrough it won't provide the memory usage benefits.
2356	 */
2357	return raw_smp_processor_id();
2358}
2359#endif
2360
2361#ifdef CONFIG_MMU
2362extern bool can_do_mlock(void);
2363#else
2364static inline bool can_do_mlock(void) { return false; }
2365#endif
2366extern int user_shm_lock(size_t, struct ucounts *);
2367extern void user_shm_unlock(size_t, struct ucounts *);
 
 
 
 
 
 
2368
2369struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
2370			     pte_t pte);
2371struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
2372			     pte_t pte);
2373struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
2374				  unsigned long addr, pmd_t pmd);
2375struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
2376				pmd_t pmd);
2377
2378void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
2379		  unsigned long size);
2380void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
2381			   unsigned long size, struct zap_details *details);
2382static inline void zap_vma_pages(struct vm_area_struct *vma)
2383{
2384	zap_page_range_single(vma, vma->vm_start,
2385			      vma->vm_end - vma->vm_start, NULL);
2386}
2387void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
2388		struct vm_area_struct *start_vma, unsigned long start,
2389		unsigned long end, unsigned long tree_end, bool mm_wr_locked);
2390
2391struct mmu_notifier_range;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2392
 
 
2393void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
2394		unsigned long end, unsigned long floor, unsigned long ceiling);
2395int
2396copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
 
 
 
 
 
 
2397int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
2398			void *buf, int len, int write);
2399
2400struct follow_pfnmap_args {
2401	/**
2402	 * Inputs:
2403	 * @vma: Pointer to @vm_area_struct struct
2404	 * @address: the virtual address to walk
2405	 */
2406	struct vm_area_struct *vma;
2407	unsigned long address;
2408	/**
2409	 * Internals:
2410	 *
2411	 * The caller shouldn't touch any of these.
2412	 */
2413	spinlock_t *lock;
2414	pte_t *ptep;
2415	/**
2416	 * Outputs:
2417	 *
2418	 * @pfn: the PFN of the address
2419	 * @pgprot: the pgprot_t of the mapping
2420	 * @writable: whether the mapping is writable
2421	 * @special: whether the mapping is a special mapping (real PFN maps)
2422	 */
2423	unsigned long pfn;
2424	pgprot_t pgprot;
2425	bool writable;
2426	bool special;
2427};
2428int follow_pfnmap_start(struct follow_pfnmap_args *args);
2429void follow_pfnmap_end(struct follow_pfnmap_args *args);
2430
2431extern void truncate_pagecache(struct inode *inode, loff_t new);
2432extern void truncate_setsize(struct inode *inode, loff_t newsize);
2433void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
2434void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
2435int generic_error_remove_folio(struct address_space *mapping,
2436		struct folio *folio);
2437
2438struct vm_area_struct *lock_mm_and_find_vma(struct mm_struct *mm,
2439		unsigned long address, struct pt_regs *regs);
2440
2441#ifdef CONFIG_MMU
2442extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2443				  unsigned long address, unsigned int flags,
2444				  struct pt_regs *regs);
2445extern int fixup_user_fault(struct mm_struct *mm,
2446			    unsigned long address, unsigned int fault_flags,
2447			    bool *unlocked);
2448void unmap_mapping_pages(struct address_space *mapping,
2449		pgoff_t start, pgoff_t nr, bool even_cows);
2450void unmap_mapping_range(struct address_space *mapping,
2451		loff_t const holebegin, loff_t const holelen, int even_cows);
2452#else
2453static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
2454					 unsigned long address, unsigned int flags,
2455					 struct pt_regs *regs)
2456{
2457	/* should never happen if there's no MMU */
2458	BUG();
2459	return VM_FAULT_SIGBUS;
2460}
2461static inline int fixup_user_fault(struct mm_struct *mm, unsigned long address,
2462		unsigned int fault_flags, bool *unlocked)
 
2463{
2464	/* should never happen if there's no MMU */
2465	BUG();
2466	return -EFAULT;
2467}
2468static inline void unmap_mapping_pages(struct address_space *mapping,
2469		pgoff_t start, pgoff_t nr, bool even_cows) { }
2470static inline void unmap_mapping_range(struct address_space *mapping,
2471		loff_t const holebegin, loff_t const holelen, int even_cows) { }
2472#endif
2473
2474static inline void unmap_shared_mapping_range(struct address_space *mapping,
2475		loff_t const holebegin, loff_t const holelen)
2476{
2477	unmap_mapping_range(mapping, holebegin, holelen, 0);
2478}
2479
2480static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm,
2481						unsigned long addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
2482
2483extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
2484		void *buf, int len, unsigned int gup_flags);
2485extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
2486		void *buf, int len, unsigned int gup_flags);
 
 
 
 
 
 
 
 
 
 
2487
2488long get_user_pages_remote(struct mm_struct *mm,
2489			   unsigned long start, unsigned long nr_pages,
2490			   unsigned int gup_flags, struct page **pages,
2491			   int *locked);
2492long pin_user_pages_remote(struct mm_struct *mm,
2493			   unsigned long start, unsigned long nr_pages,
2494			   unsigned int gup_flags, struct page **pages,
2495			   int *locked);
2496
2497/*
2498 * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
2499 */
2500static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
2501						    unsigned long addr,
2502						    int gup_flags,
2503						    struct vm_area_struct **vmap)
2504{
2505	struct page *page;
2506	struct vm_area_struct *vma;
2507	int got;
 
2508
2509	if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
2510		return ERR_PTR(-EINVAL);
 
 
 
2511
2512	got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
2513
2514	if (got < 0)
2515		return ERR_PTR(got);
2516
2517	vma = vma_lookup(mm, addr);
2518	if (WARN_ON_ONCE(!vma)) {
2519		put_page(page);
2520		return ERR_PTR(-EINVAL);
2521	}
2522
2523	*vmap = vma;
2524	return page;
2525}
2526
2527long get_user_pages(unsigned long start, unsigned long nr_pages,
2528		    unsigned int gup_flags, struct page **pages);
2529long pin_user_pages(unsigned long start, unsigned long nr_pages,
2530		    unsigned int gup_flags, struct page **pages);
2531long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2532		    struct page **pages, unsigned int gup_flags);
2533long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
2534		    struct page **pages, unsigned int gup_flags);
2535long memfd_pin_folios(struct file *memfd, loff_t start, loff_t end,
2536		      struct folio **folios, unsigned int max_folios,
2537		      pgoff_t *offset);
2538int folio_add_pins(struct folio *folio, unsigned int pins);
2539
2540int get_user_pages_fast(unsigned long start, int nr_pages,
2541			unsigned int gup_flags, struct page **pages);
2542int pin_user_pages_fast(unsigned long start, int nr_pages,
2543			unsigned int gup_flags, struct page **pages);
2544void folio_add_pin(struct folio *folio);
2545
2546int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
2547int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
2548			struct task_struct *task, bool bypass_rlim);
2549
2550struct kvec;
2551struct page *get_dump_page(unsigned long addr);
2552
2553bool folio_mark_dirty(struct folio *folio);
2554bool folio_mark_dirty_lock(struct folio *folio);
2555bool set_page_dirty(struct page *page);
2556int set_page_dirty_lock(struct page *page);
2557
2558int get_cmdline(struct task_struct *task, char *buffer, int buflen);
2559
2560/*
2561 * Flags used by change_protection().  For now we make it a bitmap so
2562 * that we can pass in multiple flags just like parameters.  However
2563 * for now all the callers are only use one of the flags at the same
2564 * time.
2565 */
2566/*
2567 * Whether we should manually check if we can map individual PTEs writable,
2568 * because something (e.g., COW, uffd-wp) blocks that from happening for all
2569 * PTEs automatically in a writable mapping.
2570 */
2571#define  MM_CP_TRY_CHANGE_WRITABLE	   (1UL << 0)
2572/* Whether this protection change is for NUMA hints */
2573#define  MM_CP_PROT_NUMA                   (1UL << 1)
2574/* Whether this change is for write protecting */
2575#define  MM_CP_UFFD_WP                     (1UL << 2) /* do wp */
2576#define  MM_CP_UFFD_WP_RESOLVE             (1UL << 3) /* Resolve wp */
2577#define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
2578					    MM_CP_UFFD_WP_RESOLVE)
2579
2580bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr,
2581			     pte_t pte);
2582extern long change_protection(struct mmu_gather *tlb,
2583			      struct vm_area_struct *vma, unsigned long start,
2584			      unsigned long end, unsigned long cp_flags);
2585extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
2586	  struct vm_area_struct *vma, struct vm_area_struct **pprev,
2587	  unsigned long start, unsigned long end, unsigned long newflags);
2588
2589/*
2590 * doesn't attempt to fault and will return short.
2591 */
2592int get_user_pages_fast_only(unsigned long start, int nr_pages,
2593			     unsigned int gup_flags, struct page **pages);
2594
2595static inline bool get_user_page_fast_only(unsigned long addr,
2596			unsigned int gup_flags, struct page **pagep)
2597{
2598	return get_user_pages_fast_only(addr, 1, gup_flags, pagep) == 1;
2599}
2600/*
2601 * per-process(per-mm_struct) statistics.
2602 */
2603static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
2604{
2605	return percpu_counter_read_positive(&mm->rss_stat[member]);
 
 
 
 
 
 
 
 
 
 
2606}
2607
2608void mm_trace_rss_stat(struct mm_struct *mm, int member);
2609
2610static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
2611{
2612	percpu_counter_add(&mm->rss_stat[member], value);
2613
2614	mm_trace_rss_stat(mm, member);
2615}
2616
2617static inline void inc_mm_counter(struct mm_struct *mm, int member)
2618{
2619	percpu_counter_inc(&mm->rss_stat[member]);
2620
2621	mm_trace_rss_stat(mm, member);
2622}
2623
2624static inline void dec_mm_counter(struct mm_struct *mm, int member)
2625{
2626	percpu_counter_dec(&mm->rss_stat[member]);
2627
2628	mm_trace_rss_stat(mm, member);
2629}
2630
2631/* Optimized variant when folio is already known not to be anon */
2632static inline int mm_counter_file(struct folio *folio)
2633{
2634	if (folio_test_swapbacked(folio))
2635		return MM_SHMEMPAGES;
2636	return MM_FILEPAGES;
2637}
2638
2639static inline int mm_counter(struct folio *folio)
2640{
2641	if (folio_test_anon(folio))
2642		return MM_ANONPAGES;
2643	return mm_counter_file(folio);
2644}
2645
2646static inline unsigned long get_mm_rss(struct mm_struct *mm)
2647{
2648	return get_mm_counter(mm, MM_FILEPAGES) +
2649		get_mm_counter(mm, MM_ANONPAGES) +
2650		get_mm_counter(mm, MM_SHMEMPAGES);
2651}
2652
2653static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
2654{
2655	return max(mm->hiwater_rss, get_mm_rss(mm));
2656}
2657
2658static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
2659{
2660	return max(mm->hiwater_vm, mm->total_vm);
2661}
2662
2663static inline void update_hiwater_rss(struct mm_struct *mm)
2664{
2665	unsigned long _rss = get_mm_rss(mm);
2666
2667	if ((mm)->hiwater_rss < _rss)
2668		(mm)->hiwater_rss = _rss;
2669}
2670
2671static inline void update_hiwater_vm(struct mm_struct *mm)
2672{
2673	if (mm->hiwater_vm < mm->total_vm)
2674		mm->hiwater_vm = mm->total_vm;
2675}
2676
2677static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
2678{
2679	mm->hiwater_rss = get_mm_rss(mm);
2680}
2681
2682static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
2683					 struct mm_struct *mm)
2684{
2685	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
2686
2687	if (*maxrss < hiwater_rss)
2688		*maxrss = hiwater_rss;
2689}
2690
2691#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
2692static inline int pte_special(pte_t pte)
2693{
2694	return 0;
2695}
2696
2697static inline pte_t pte_mkspecial(pte_t pte)
2698{
2699	return pte;
2700}
2701#endif
2702
2703#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
2704static inline bool pmd_special(pmd_t pmd)
2705{
2706	return false;
2707}
2708
2709static inline pmd_t pmd_mkspecial(pmd_t pmd)
2710{
2711	return pmd;
2712}
2713#endif	/* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
2714
2715#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
2716static inline bool pud_special(pud_t pud)
2717{
2718	return false;
2719}
2720
2721static inline pud_t pud_mkspecial(pud_t pud)
2722{
2723	return pud;
2724}
2725#endif	/* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
2726
2727#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
2728static inline int pte_devmap(pte_t pte)
2729{
2730	return 0;
2731}
2732#endif
2733
2734extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
2735			       spinlock_t **ptl);
2736static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
2737				    spinlock_t **ptl)
2738{
2739	pte_t *ptep;
2740	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
2741	return ptep;
2742}
2743
2744#ifdef __PAGETABLE_P4D_FOLDED
2745static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2746						unsigned long address)
2747{
2748	return 0;
2749}
2750#else
2751int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
2752#endif
2753
2754#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
2755static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2756						unsigned long address)
2757{
2758	return 0;
2759}
2760static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
2761static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
2762
2763#else
2764int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
2765
2766static inline void mm_inc_nr_puds(struct mm_struct *mm)
2767{
2768	if (mm_pud_folded(mm))
2769		return;
2770	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2771}
2772
2773static inline void mm_dec_nr_puds(struct mm_struct *mm)
2774{
2775	if (mm_pud_folded(mm))
2776		return;
2777	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
2778}
2779#endif
2780
2781#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
2782static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
2783						unsigned long address)
2784{
2785	return 0;
2786}
2787
2788static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
2789static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
2790
2791#else
2792int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
2793
2794static inline void mm_inc_nr_pmds(struct mm_struct *mm)
2795{
2796	if (mm_pmd_folded(mm))
2797		return;
2798	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2799}
2800
2801static inline void mm_dec_nr_pmds(struct mm_struct *mm)
2802{
2803	if (mm_pmd_folded(mm))
2804		return;
2805	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
2806}
2807#endif
2808
2809#ifdef CONFIG_MMU
2810static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
2811{
2812	atomic_long_set(&mm->pgtables_bytes, 0);
2813}
2814
2815static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
 
 
 
 
 
2816{
2817	return atomic_long_read(&mm->pgtables_bytes);
2818}
2819
2820static inline void mm_inc_nr_ptes(struct mm_struct *mm)
2821{
2822	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2823}
2824
2825static inline void mm_dec_nr_ptes(struct mm_struct *mm)
2826{
2827	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
2828}
2829#else
2830
2831static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
2832static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
2833{
2834	return 0;
2835}
2836
2837static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
2838static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
2839#endif
2840
2841int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
2842int __pte_alloc_kernel(pmd_t *pmd);
2843
2844#if defined(CONFIG_MMU)
2845
2846static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
2847		unsigned long address)
2848{
2849	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
2850		NULL : p4d_offset(pgd, address);
2851}
2852
2853static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
2854		unsigned long address)
2855{
2856	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
2857		NULL : pud_offset(p4d, address);
2858}
2859
2860static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2861{
2862	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
2863		NULL: pmd_offset(pud, address);
2864}
2865#endif /* CONFIG_MMU */
2866
2867static inline struct ptdesc *virt_to_ptdesc(const void *x)
2868{
2869	return page_ptdesc(virt_to_page(x));
2870}
2871
2872static inline void *ptdesc_to_virt(const struct ptdesc *pt)
2873{
2874	return page_to_virt(ptdesc_page(pt));
2875}
2876
2877static inline void *ptdesc_address(const struct ptdesc *pt)
2878{
2879	return folio_address(ptdesc_folio(pt));
2880}
2881
2882static inline bool pagetable_is_reserved(struct ptdesc *pt)
2883{
2884	return folio_test_reserved(ptdesc_folio(pt));
2885}
2886
2887/**
2888 * pagetable_alloc - Allocate pagetables
2889 * @gfp:    GFP flags
2890 * @order:  desired pagetable order
2891 *
2892 * pagetable_alloc allocates memory for page tables as well as a page table
2893 * descriptor to describe that memory.
2894 *
2895 * Return: The ptdesc describing the allocated page tables.
2896 */
2897static inline struct ptdesc *pagetable_alloc_noprof(gfp_t gfp, unsigned int order)
2898{
2899	struct page *page = alloc_pages_noprof(gfp | __GFP_COMP, order);
2900
2901	return page_ptdesc(page);
2902}
2903#define pagetable_alloc(...)	alloc_hooks(pagetable_alloc_noprof(__VA_ARGS__))
2904
2905/**
2906 * pagetable_free - Free pagetables
2907 * @pt:	The page table descriptor
2908 *
2909 * pagetable_free frees the memory of all page tables described by a page
2910 * table descriptor and the memory for the descriptor itself.
2911 */
2912static inline void pagetable_free(struct ptdesc *pt)
2913{
2914	struct page *page = ptdesc_page(pt);
2915
2916	__free_pages(page, compound_order(page));
2917}
2918
2919#if defined(CONFIG_SPLIT_PTE_PTLOCKS)
2920#if ALLOC_SPLIT_PTLOCKS
2921void __init ptlock_cache_init(void);
2922bool ptlock_alloc(struct ptdesc *ptdesc);
2923void ptlock_free(struct ptdesc *ptdesc);
2924
2925static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2926{
2927	return ptdesc->ptl;
2928}
2929#else /* ALLOC_SPLIT_PTLOCKS */
2930static inline void ptlock_cache_init(void)
2931{
2932}
2933
2934static inline bool ptlock_alloc(struct ptdesc *ptdesc)
2935{
2936	return true;
2937}
2938
2939static inline void ptlock_free(struct ptdesc *ptdesc)
2940{
2941}
2942
2943static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
2944{
2945	return &ptdesc->ptl;
2946}
2947#endif /* ALLOC_SPLIT_PTLOCKS */
2948
2949static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2950{
2951	return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
2952}
2953
2954static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
2955{
2956	BUILD_BUG_ON(IS_ENABLED(CONFIG_HIGHPTE));
2957	BUILD_BUG_ON(MAX_PTRS_PER_PTE * sizeof(pte_t) > PAGE_SIZE);
2958	return ptlock_ptr(virt_to_ptdesc(pte));
2959}
2960
2961static inline bool ptlock_init(struct ptdesc *ptdesc)
2962{
2963	/*
2964	 * prep_new_page() initialize page->private (and therefore page->ptl)
2965	 * with 0. Make sure nobody took it in use in between.
2966	 *
2967	 * It can happen if arch try to use slab for page table allocation:
2968	 * slab code uses page->slab_cache, which share storage with page->ptl.
 
2969	 */
2970	VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
2971	if (!ptlock_alloc(ptdesc))
2972		return false;
2973	spin_lock_init(ptlock_ptr(ptdesc));
2974	return true;
2975}
2976
2977#else	/* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */
 
 
 
 
 
 
 
2978/*
2979 * We use mm->page_table_lock to guard all pagetable pages of the mm.
2980 */
2981static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
2982{
2983	return &mm->page_table_lock;
2984}
2985static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte)
2986{
2987	return &mm->page_table_lock;
2988}
2989static inline void ptlock_cache_init(void) {}
2990static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
2991static inline void ptlock_free(struct ptdesc *ptdesc) {}
2992#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */
2993
2994static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
2995{
2996	struct folio *folio = ptdesc_folio(ptdesc);
2997
2998	if (!ptlock_init(ptdesc))
2999		return false;
3000	__folio_set_pgtable(folio);
3001	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3002	return true;
3003}
3004
3005static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
3006{
3007	struct folio *folio = ptdesc_folio(ptdesc);
3008
3009	ptlock_free(ptdesc);
3010	__folio_clear_pgtable(folio);
3011	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3012}
3013
3014pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
3015static inline pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr,
3016			pmd_t *pmdvalp)
3017{
3018	pte_t *pte;
3019
3020	__cond_lock(RCU, pte = ___pte_offset_map(pmd, addr, pmdvalp));
3021	return pte;
3022}
3023static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr)
3024{
3025	return __pte_offset_map(pmd, addr, NULL);
3026}
3027
3028pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3029			unsigned long addr, spinlock_t **ptlp);
3030static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
3031			unsigned long addr, spinlock_t **ptlp)
3032{
3033	pte_t *pte;
3034
3035	__cond_lock(RCU, __cond_lock(*ptlp,
3036			pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)));
3037	return pte;
3038}
3039
3040pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
3041				unsigned long addr, spinlock_t **ptlp);
3042pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
3043				unsigned long addr, pmd_t *pmdvalp,
3044				spinlock_t **ptlp);
 
 
 
3045
3046#define pte_unmap_unlock(pte, ptl)	do {		\
3047	spin_unlock(ptl);				\
3048	pte_unmap(pte);					\
3049} while (0)
3050
3051#define pte_alloc(mm, pmd) (unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd))
3052
3053#define pte_alloc_map(mm, pmd, address)			\
3054	(pte_alloc(mm, pmd) ? NULL : pte_offset_map(pmd, address))
3055
3056#define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
3057	(pte_alloc(mm, pmd) ?			\
3058		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
 
3059
3060#define pte_alloc_kernel(pmd, address)			\
3061	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \
3062		NULL: pte_offset_kernel(pmd, address))
3063
3064#if defined(CONFIG_SPLIT_PMD_PTLOCKS)
3065
3066static inline struct page *pmd_pgtable_page(pmd_t *pmd)
3067{
3068	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
3069	return virt_to_page((void *)((unsigned long) pmd & mask));
3070}
3071
3072static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
3073{
3074	return page_ptdesc(pmd_pgtable_page(pmd));
3075}
3076
3077static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3078{
3079	return ptlock_ptr(pmd_ptdesc(pmd));
3080}
3081
3082static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
3083{
3084#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3085	ptdesc->pmd_huge_pte = NULL;
3086#endif
3087	return ptlock_init(ptdesc);
3088}
3089
3090static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
3091{
3092#ifdef CONFIG_TRANSPARENT_HUGEPAGE
3093	VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
3094#endif
3095	ptlock_free(ptdesc);
3096}
3097
3098#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
3099
3100#else
3101
3102static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
3103{
3104	return &mm->page_table_lock;
3105}
3106
3107static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
3108static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
3109
3110#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
3111
3112#endif
3113
3114static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
3115{
3116	spinlock_t *ptl = pmd_lockptr(mm, pmd);
3117	spin_lock(ptl);
3118	return ptl;
3119}
3120
3121static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
3122{
3123	struct folio *folio = ptdesc_folio(ptdesc);
3124
3125	if (!pmd_ptlock_init(ptdesc))
3126		return false;
3127	__folio_set_pgtable(folio);
3128	ptdesc_pmd_pts_init(ptdesc);
3129	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3130	return true;
3131}
3132
3133static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
3134{
3135	struct folio *folio = ptdesc_folio(ptdesc);
3136
3137	pmd_ptlock_free(ptdesc);
3138	__folio_clear_pgtable(folio);
3139	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3140}
3141
3142/*
3143 * No scalability reason to split PUD locks yet, but follow the same pattern
3144 * as the PMD locks to make it easier if we decide to.  The VM should not be
3145 * considered ready to switch to split PUD locks yet; there may be places
3146 * which need to be converted from page_table_lock.
3147 */
3148static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
3149{
3150	return &mm->page_table_lock;
3151}
3152
3153static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
3154{
3155	spinlock_t *ptl = pud_lockptr(mm, pud);
3156
3157	spin_lock(ptl);
3158	return ptl;
3159}
3160
3161static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
3162{
3163	struct folio *folio = ptdesc_folio(ptdesc);
3164
3165	__folio_set_pgtable(folio);
3166	lruvec_stat_add_folio(folio, NR_PAGETABLE);
3167}
3168
3169static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
3170{
3171	struct folio *folio = ptdesc_folio(ptdesc);
3172
3173	__folio_clear_pgtable(folio);
3174	lruvec_stat_sub_folio(folio, NR_PAGETABLE);
3175}
3176
3177extern void __init pagecache_init(void);
3178extern void free_initmem(void);
3179
3180/*
3181 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
3182 * into the buddy system. The freed pages will be poisoned with pattern
3183 * "poison" if it's within range [0, UCHAR_MAX].
3184 * Return pages freed into the buddy system.
3185 */
3186extern unsigned long free_reserved_area(void *start, void *end,
3187					int poison, const char *s);
 
 
 
 
 
 
 
 
3188
3189extern void adjust_managed_page_count(struct page *page, long count);
 
3190
3191extern void reserve_bootmem_region(phys_addr_t start,
3192				   phys_addr_t end, int nid);
 
 
 
 
 
3193
3194/* Free the reserved page into the buddy system, so it gets managed. */
3195void free_reserved_page(struct page *page);
3196#define free_highmem_page(page) free_reserved_page(page)
 
 
3197
3198static inline void mark_page_reserved(struct page *page)
3199{
3200	SetPageReserved(page);
3201	adjust_managed_page_count(page, -1);
3202}
3203
3204static inline void free_reserved_ptdesc(struct ptdesc *pt)
3205{
3206	free_reserved_page(ptdesc_page(pt));
3207}
3208
3209/*
3210 * Default method to free all the __init memory into the buddy system.
3211 * The freed pages will be poisoned with pattern "poison" if it's within
3212 * range [0, UCHAR_MAX].
3213 * Return pages freed into the buddy system.
3214 */
3215static inline unsigned long free_initmem_default(int poison)
3216{
3217	extern char __init_begin[], __init_end[];
3218
3219	return free_reserved_area(&__init_begin, &__init_end,
3220				  poison, "unused kernel image (initmem)");
3221}
3222
3223static inline unsigned long get_num_physpages(void)
3224{
3225	int nid;
3226	unsigned long phys_pages = 0;
3227
3228	for_each_online_node(nid)
3229		phys_pages += node_present_pages(nid);
3230
3231	return phys_pages;
3232}
3233
 
3234/*
3235 * Using memblock node mappings, an architecture may initialise its
3236 * zones, allocate the backing mem_map and account for memory holes in an
3237 * architecture independent manner.
 
 
3238 *
3239 * An architecture is expected to register range of page frames backed by
3240 * physical memory with memblock_add[_node]() before calling
3241 * free_area_init() passing in the PFN each zone ends at. At a basic
3242 * usage, an architecture is expected to do something like
3243 *
3244 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
3245 * 							 max_highmem_pfn};
3246 * for_each_valid_physical_page_range()
3247 *	memblock_add_node(base, size, nid, MEMBLOCK_NONE)
3248 * free_area_init(max_zone_pfns);
 
 
 
 
 
 
 
 
3249 */
3250void free_area_init(unsigned long *max_zone_pfn);
3251unsigned long node_map_pfn_alignment(void);
 
 
3252extern unsigned long absent_pages_in_range(unsigned long start_pfn,
3253						unsigned long end_pfn);
3254extern void get_pfn_range_for_nid(unsigned int nid,
3255			unsigned long *start_pfn, unsigned long *end_pfn);
3256
3257#ifndef CONFIG_NUMA
3258static inline int early_pfn_to_nid(unsigned long pfn)
 
 
 
 
 
 
 
3259{
3260	return 0;
3261}
3262#else
3263/* please see mm/page_alloc.c */
3264extern int __meminit early_pfn_to_nid(unsigned long pfn);
 
 
3265#endif
3266
 
 
 
 
 
3267extern void mem_init(void);
3268extern void __init mmap_init(void);
3269
3270extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
3271static inline void show_mem(void)
3272{
3273	__show_mem(0, NULL, MAX_NR_ZONES - 1);
3274}
3275extern long si_mem_available(void);
3276extern void si_meminfo(struct sysinfo * val);
3277extern void si_meminfo_node(struct sysinfo *val, int nid);
3278
3279extern __printf(3, 4)
3280void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
3281
3282extern void setup_per_cpu_pageset(void);
3283
 
 
 
 
 
 
3284/* nommu.c */
3285extern atomic_long_t mmap_pages_allocated;
3286extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
3287
3288/* interval_tree.c */
3289void vma_interval_tree_insert(struct vm_area_struct *node,
3290			      struct rb_root_cached *root);
3291void vma_interval_tree_insert_after(struct vm_area_struct *node,
3292				    struct vm_area_struct *prev,
3293				    struct rb_root_cached *root);
3294void vma_interval_tree_remove(struct vm_area_struct *node,
3295			      struct rb_root_cached *root);
3296struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
3297				unsigned long start, unsigned long last);
3298struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
3299				unsigned long start, unsigned long last);
3300
3301#define vma_interval_tree_foreach(vma, root, start, last)		\
3302	for (vma = vma_interval_tree_iter_first(root, start, last);	\
3303	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
3304
 
 
 
 
 
 
3305void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
3306				   struct rb_root_cached *root);
3307void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
3308				   struct rb_root_cached *root);
3309struct anon_vma_chain *
3310anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
3311				  unsigned long start, unsigned long last);
3312struct anon_vma_chain *anon_vma_interval_tree_iter_next(
3313	struct anon_vma_chain *node, unsigned long start, unsigned long last);
3314#ifdef CONFIG_DEBUG_VM_RB
3315void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
3316#endif
3317
3318#define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
3319	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
3320	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
3321
3322/* mmap.c */
3323extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
 
 
 
 
 
 
 
 
 
3324extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
 
 
 
 
 
 
3325extern void exit_mmap(struct mm_struct *);
3326int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift);
3327
3328static inline int check_data_rlimit(unsigned long rlim,
3329				    unsigned long new,
3330				    unsigned long start,
3331				    unsigned long end_data,
3332				    unsigned long start_data)
3333{
3334	if (rlim < RLIM_INFINITY) {
3335		if (((new - start) + (end_data - start_data)) > rlim)
3336			return -ENOSPC;
3337	}
3338
3339	return 0;
3340}
3341
3342extern int mm_take_all_locks(struct mm_struct *mm);
3343extern void mm_drop_all_locks(struct mm_struct *mm);
3344
3345extern int set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3346extern int replace_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
3347extern struct file *get_mm_exe_file(struct mm_struct *mm);
3348extern struct file *get_task_exe_file(struct task_struct *task);
3349
3350extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
3351extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
3352
3353extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
3354				   const struct vm_special_mapping *sm);
3355extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
3356				   unsigned long addr, unsigned long len,
3357				   unsigned long flags,
3358				   const struct vm_special_mapping *spec);
3359
3360unsigned long randomize_stack_top(unsigned long stack_top);
3361unsigned long randomize_page(unsigned long start, unsigned long range);
3362
3363unsigned long
3364__get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3365		    unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags);
3366
3367static inline unsigned long
3368get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
3369		  unsigned long pgoff, unsigned long flags)
3370{
3371	return __get_unmapped_area(file, addr, len, pgoff, flags, 0);
3372}
3373
3374extern unsigned long mmap_region(struct file *file, unsigned long addr,
3375	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
3376	struct list_head *uf);
3377extern unsigned long do_mmap(struct file *file, unsigned long addr,
3378	unsigned long len, unsigned long prot, unsigned long flags,
3379	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
3380	struct list_head *uf);
3381extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
3382			 unsigned long start, size_t len, struct list_head *uf,
3383			 bool unlock);
3384int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
3385		    struct mm_struct *mm, unsigned long start,
3386		    unsigned long end, struct list_head *uf, bool unlock);
3387extern int do_munmap(struct mm_struct *, unsigned long, size_t,
3388		     struct list_head *uf);
3389extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior);
3390
3391#ifdef CONFIG_MMU
3392extern int __mm_populate(unsigned long addr, unsigned long len,
3393			 int ignore_errors);
3394static inline void mm_populate(unsigned long addr, unsigned long len)
3395{
3396	/* Ignore errors */
3397	(void) __mm_populate(addr, len, 1);
3398}
3399#else
3400static inline void mm_populate(unsigned long addr, unsigned long len) {}
3401#endif
3402
3403/* This takes the mm semaphore itself */
3404extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
3405extern int vm_munmap(unsigned long, size_t);
3406extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
3407        unsigned long, unsigned long,
3408        unsigned long, unsigned long);
3409
3410struct vm_unmapped_area_info {
3411#define VM_UNMAPPED_AREA_TOPDOWN 1
3412	unsigned long flags;
3413	unsigned long length;
3414	unsigned long low_limit;
3415	unsigned long high_limit;
3416	unsigned long align_mask;
3417	unsigned long align_offset;
3418	unsigned long start_gap;
3419};
3420
3421extern unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3422
3423/* truncate.c */
3424extern void truncate_inode_pages(struct address_space *, loff_t);
3425extern void truncate_inode_pages_range(struct address_space *,
3426				       loff_t lstart, loff_t lend);
3427extern void truncate_inode_pages_final(struct address_space *);
3428
3429/* generic vm_area_ops exported for stackable file systems */
3430extern vm_fault_t filemap_fault(struct vm_fault *vmf);
3431extern vm_fault_t filemap_map_pages(struct vm_fault *vmf,
3432		pgoff_t start_pgoff, pgoff_t end_pgoff);
3433extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3434
3435extern unsigned long stack_guard_gap;
3436/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
3437int expand_stack_locked(struct vm_area_struct *vma, unsigned long address);
3438struct vm_area_struct *expand_stack(struct mm_struct * mm, unsigned long addr);
3439
3440/* CONFIG_STACK_GROWSUP still needs to grow downwards at some places */
3441int expand_downwards(struct vm_area_struct *vma, unsigned long address);
 
 
 
 
 
 
3442
3443/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
3444extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
3445extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
3446					     struct vm_area_struct **pprev);
3447
3448/*
3449 * Look up the first VMA which intersects the interval [start_addr, end_addr)
3450 * NULL if none.  Assume start_addr < end_addr.
3451 */
3452struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
3453			unsigned long start_addr, unsigned long end_addr);
3454
3455/**
3456 * vma_lookup() - Find a VMA at a specific address
3457 * @mm: The process address space.
3458 * @addr: The user address.
3459 *
3460 * Return: The vm_area_struct at the given address, %NULL otherwise.
3461 */
3462static inline
3463struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
3464{
3465	return mtree_load(&mm->mm_mt, addr);
3466}
3467
3468static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
3469{
3470	if (vma->vm_flags & VM_GROWSDOWN)
3471		return stack_guard_gap;
3472
3473	/* See reasoning around the VM_SHADOW_STACK definition */
3474	if (vma->vm_flags & VM_SHADOW_STACK)
3475		return PAGE_SIZE;
3476
3477	return 0;
3478}
3479
3480static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
3481{
3482	unsigned long gap = stack_guard_start_gap(vma);
3483	unsigned long vm_start = vma->vm_start;
3484
3485	vm_start -= gap;
3486	if (vm_start > vma->vm_start)
3487		vm_start = 0;
3488	return vm_start;
3489}
3490
3491static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
3492{
3493	unsigned long vm_end = vma->vm_end;
3494
3495	if (vma->vm_flags & VM_GROWSUP) {
3496		vm_end += stack_guard_gap;
3497		if (vm_end < vma->vm_end)
3498			vm_end = -PAGE_SIZE;
3499	}
3500	return vm_end;
3501}
3502
3503static inline unsigned long vma_pages(struct vm_area_struct *vma)
3504{
3505	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
3506}
3507
3508/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
3509static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
3510				unsigned long vm_start, unsigned long vm_end)
3511{
3512	struct vm_area_struct *vma = vma_lookup(mm, vm_start);
3513
3514	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
3515		vma = NULL;
3516
3517	return vma;
3518}
3519
3520static inline bool range_in_vma(struct vm_area_struct *vma,
3521				unsigned long start, unsigned long end)
3522{
3523	return (vma && vma->vm_start <= start && end <= vma->vm_end);
3524}
3525
3526#ifdef CONFIG_MMU
3527pgprot_t vm_get_page_prot(unsigned long vm_flags);
3528void vma_set_page_prot(struct vm_area_struct *vma);
3529#else
3530static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
3531{
3532	return __pgprot(0);
3533}
3534static inline void vma_set_page_prot(struct vm_area_struct *vma)
3535{
3536	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3537}
3538#endif
3539
3540void vma_set_file(struct vm_area_struct *vma, struct file *file);
3541
3542#ifdef CONFIG_NUMA_BALANCING
3543unsigned long change_prot_numa(struct vm_area_struct *vma,
3544			unsigned long start, unsigned long end);
3545#endif
3546
3547struct vm_area_struct *find_extend_vma_locked(struct mm_struct *,
3548		unsigned long addr);
3549int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
3550			unsigned long pfn, unsigned long size, pgprot_t);
3551int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long addr,
3552		unsigned long pfn, unsigned long size, pgprot_t prot);
3553int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
3554int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
3555			struct page **pages, unsigned long *num);
3556int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
3557				unsigned long num);
3558int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
3559				unsigned long num);
3560vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
3561			unsigned long pfn);
3562vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
3563			unsigned long pfn, pgprot_t pgprot);
3564vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
3565			pfn_t pfn);
3566vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
3567		unsigned long addr, pfn_t pfn);
3568int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
3569
3570static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
3571				unsigned long addr, struct page *page)
3572{
3573	int err = vm_insert_page(vma, addr, page);
3574
3575	if (err == -ENOMEM)
3576		return VM_FAULT_OOM;
3577	if (err < 0 && err != -EBUSY)
3578		return VM_FAULT_SIGBUS;
3579
3580	return VM_FAULT_NOPAGE;
3581}
3582
3583#ifndef io_remap_pfn_range
3584static inline int io_remap_pfn_range(struct vm_area_struct *vma,
3585				     unsigned long addr, unsigned long pfn,
3586				     unsigned long size, pgprot_t prot)
3587{
3588	return remap_pfn_range(vma, addr, pfn, size, pgprot_decrypted(prot));
3589}
3590#endif
3591
3592static inline vm_fault_t vmf_error(int err)
3593{
3594	if (err == -ENOMEM)
3595		return VM_FAULT_OOM;
3596	else if (err == -EHWPOISON)
3597		return VM_FAULT_HWPOISON;
3598	return VM_FAULT_SIGBUS;
3599}
3600
3601/*
3602 * Convert errno to return value for ->page_mkwrite() calls.
3603 *
3604 * This should eventually be merged with vmf_error() above, but will need a
3605 * careful audit of all vmf_error() callers.
3606 */
3607static inline vm_fault_t vmf_fs_error(int err)
3608{
3609	if (err == 0)
3610		return VM_FAULT_LOCKED;
3611	if (err == -EFAULT || err == -EAGAIN)
3612		return VM_FAULT_NOPAGE;
3613	if (err == -ENOMEM)
3614		return VM_FAULT_OOM;
3615	/* -ENOSPC, -EDQUOT, -EIO ... */
3616	return VM_FAULT_SIGBUS;
3617}
3618
3619static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
3620{
3621	if (vm_fault & VM_FAULT_OOM)
3622		return -ENOMEM;
3623	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
3624		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
3625	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
3626		return -EFAULT;
3627	return 0;
3628}
3629
3630/*
3631 * Indicates whether GUP can follow a PROT_NONE mapped page, or whether
3632 * a (NUMA hinting) fault is required.
3633 */
3634static inline bool gup_can_follow_protnone(struct vm_area_struct *vma,
3635					   unsigned int flags)
3636{
3637	/*
3638	 * If callers don't want to honor NUMA hinting faults, no need to
3639	 * determine if we would actually have to trigger a NUMA hinting fault.
3640	 */
3641	if (!(flags & FOLL_HONOR_NUMA_FAULT))
3642		return true;
3643
3644	/*
3645	 * NUMA hinting faults don't apply in inaccessible (PROT_NONE) VMAs.
3646	 *
3647	 * Requiring a fault here even for inaccessible VMAs would mean that
3648	 * FOLL_FORCE cannot make any progress, because handle_mm_fault()
3649	 * refuses to process NUMA hinting faults in inaccessible VMAs.
3650	 */
3651	return !vma_is_accessible(vma);
3652}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3653
3654typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
 
3655extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
3656			       unsigned long size, pte_fn_t fn, void *data);
3657extern int apply_to_existing_page_range(struct mm_struct *mm,
3658				   unsigned long address, unsigned long size,
3659				   pte_fn_t fn, void *data);
3660
3661#ifdef CONFIG_PAGE_POISONING
3662extern void __kernel_poison_pages(struct page *page, int numpages);
3663extern void __kernel_unpoison_pages(struct page *page, int numpages);
3664extern bool _page_poisoning_enabled_early;
3665DECLARE_STATIC_KEY_FALSE(_page_poisoning_enabled);
3666static inline bool page_poisoning_enabled(void)
3667{
3668	return _page_poisoning_enabled_early;
3669}
3670/*
3671 * For use in fast paths after init_mem_debugging() has run, or when a
3672 * false negative result is not harmful when called too early.
3673 */
3674static inline bool page_poisoning_enabled_static(void)
3675{
3676	return static_branch_unlikely(&_page_poisoning_enabled);
3677}
3678static inline void kernel_poison_pages(struct page *page, int numpages)
3679{
3680	if (page_poisoning_enabled_static())
3681		__kernel_poison_pages(page, numpages);
3682}
3683static inline void kernel_unpoison_pages(struct page *page, int numpages)
3684{
3685	if (page_poisoning_enabled_static())
3686		__kernel_unpoison_pages(page, numpages);
3687}
3688#else
3689static inline bool page_poisoning_enabled(void) { return false; }
3690static inline bool page_poisoning_enabled_static(void) { return false; }
3691static inline void __kernel_poison_pages(struct page *page, int nunmpages) { }
3692static inline void kernel_poison_pages(struct page *page, int numpages) { }
3693static inline void kernel_unpoison_pages(struct page *page, int numpages) { }
3694#endif
3695
3696DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc);
3697static inline bool want_init_on_alloc(gfp_t flags)
3698{
3699	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
3700				&init_on_alloc))
3701		return true;
3702	return flags & __GFP_ZERO;
3703}
3704
3705DECLARE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free);
3706static inline bool want_init_on_free(void)
3707{
3708	return static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
3709				   &init_on_free);
3710}
3711
3712extern bool _debug_pagealloc_enabled_early;
3713DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
3714
3715static inline bool debug_pagealloc_enabled(void)
3716{
3717	return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
3718		_debug_pagealloc_enabled_early;
3719}
 
3720
3721/*
3722 * For use in fast paths after mem_debugging_and_hardening_init() has run,
3723 * or when a false negative result is not harmful when called too early.
3724 */
3725static inline bool debug_pagealloc_enabled_static(void)
3726{
3727	if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
3728		return false;
3729
3730	return static_branch_unlikely(&_debug_pagealloc_enabled);
3731}
3732
3733/*
3734 * To support DEBUG_PAGEALLOC architecture must ensure that
3735 * __kernel_map_pages() never fails
3736 */
3737extern void __kernel_map_pages(struct page *page, int numpages, int enable);
3738#ifdef CONFIG_DEBUG_PAGEALLOC
3739static inline void debug_pagealloc_map_pages(struct page *page, int numpages)
3740{
3741	if (debug_pagealloc_enabled_static())
3742		__kernel_map_pages(page, numpages, 1);
3743}
3744
3745static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages)
3746{
3747	if (debug_pagealloc_enabled_static())
3748		__kernel_map_pages(page, numpages, 0);
3749}
3750
3751extern unsigned int _debug_guardpage_minorder;
3752DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
3753
3754static inline unsigned int debug_guardpage_minorder(void)
3755{
3756	return _debug_guardpage_minorder;
3757}
3758
3759static inline bool debug_guardpage_enabled(void)
3760{
3761	return static_branch_unlikely(&_debug_guardpage_enabled);
3762}
3763
3764static inline bool page_is_guard(struct page *page)
3765{
3766	if (!debug_guardpage_enabled())
3767		return false;
3768
3769	return PageGuard(page);
3770}
3771
3772bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order);
3773static inline bool set_page_guard(struct zone *zone, struct page *page,
3774				  unsigned int order)
3775{
3776	if (!debug_guardpage_enabled())
3777		return false;
3778	return __set_page_guard(zone, page, order);
3779}
3780
3781void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order);
3782static inline void clear_page_guard(struct zone *zone, struct page *page,
3783				    unsigned int order)
3784{
3785	if (!debug_guardpage_enabled())
3786		return;
3787	__clear_page_guard(zone, page, order);
3788}
3789
3790#else	/* CONFIG_DEBUG_PAGEALLOC */
3791static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {}
3792static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {}
3793static inline unsigned int debug_guardpage_minorder(void) { return 0; }
3794static inline bool debug_guardpage_enabled(void) { return false; }
3795static inline bool page_is_guard(struct page *page) { return false; }
3796static inline bool set_page_guard(struct zone *zone, struct page *page,
3797			unsigned int order) { return false; }
3798static inline void clear_page_guard(struct zone *zone, struct page *page,
3799				unsigned int order) {}
3800#endif	/* CONFIG_DEBUG_PAGEALLOC */
3801
3802#ifdef __HAVE_ARCH_GATE_AREA
3803extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
3804extern int in_gate_area_no_mm(unsigned long addr);
3805extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
 
3806#else
3807static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
3808{
3809	return NULL;
3810}
3811static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
3812static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
3813{
3814	return 0;
3815}
3816#endif	/* __HAVE_ARCH_GATE_AREA */
3817
3818extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
3819
3820#ifdef CONFIG_SYSCTL
3821extern int sysctl_drop_caches;
3822int drop_caches_sysctl_handler(const struct ctl_table *, int, void *, size_t *,
3823		loff_t *);
3824#endif
3825
3826void drop_slab(void);
 
 
3827
3828#ifndef CONFIG_MMU
3829#define randomize_va_space 0
3830#else
3831extern int randomize_va_space;
3832#endif
3833
3834const char * arch_vma_name(struct vm_area_struct *vma);
3835#ifdef CONFIG_MMU
3836void print_vma_addr(char *prefix, unsigned long rip);
3837#else
3838static inline void print_vma_addr(char *prefix, unsigned long rip)
3839{
3840}
3841#endif
3842
3843void *sparse_buffer_alloc(unsigned long size);
3844struct page * __populate_section_memmap(unsigned long pfn,
3845		unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
3846		struct dev_pagemap *pgmap);
 
 
 
3847pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
3848p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
3849pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
3850pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
3851pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node,
3852			    struct vmem_altmap *altmap, struct page *reuse);
3853void *vmemmap_alloc_block(unsigned long size, int node);
3854struct vmem_altmap;
3855void *vmemmap_alloc_block_buf(unsigned long size, int node,
3856			      struct vmem_altmap *altmap);
3857void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
3858void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
3859		     unsigned long addr, unsigned long next);
3860int vmemmap_check_pmd(pmd_t *pmd, int node,
3861		      unsigned long addr, unsigned long next);
3862int vmemmap_populate_basepages(unsigned long start, unsigned long end,
3863			       int node, struct vmem_altmap *altmap);
3864int vmemmap_populate_hugepages(unsigned long start, unsigned long end,
3865			       int node, struct vmem_altmap *altmap);
3866int vmemmap_populate(unsigned long start, unsigned long end, int node,
3867		struct vmem_altmap *altmap);
3868void vmemmap_populate_print_last(void);
3869#ifdef CONFIG_MEMORY_HOTPLUG
3870void vmemmap_free(unsigned long start, unsigned long end,
3871		struct vmem_altmap *altmap);
3872#endif
3873
3874#ifdef CONFIG_SPARSEMEM_VMEMMAP
3875static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3876{
3877	/* number of pfns from base where pfn_to_page() is valid */
3878	if (altmap)
3879		return altmap->reserve + altmap->free;
3880	return 0;
3881}
3882
3883static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3884				    unsigned long nr_pfns)
3885{
3886	altmap->alloc -= nr_pfns;
3887}
3888#else
3889static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap)
3890{
3891	return 0;
3892}
3893
3894static inline void vmem_altmap_free(struct vmem_altmap *altmap,
3895				    unsigned long nr_pfns)
3896{
3897}
3898#endif
3899
3900#define VMEMMAP_RESERVE_NR	2
3901#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
3902static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
3903					  struct dev_pagemap *pgmap)
3904{
3905	unsigned long nr_pages;
3906	unsigned long nr_vmemmap_pages;
3907
3908	if (!pgmap || !is_power_of_2(sizeof(struct page)))
3909		return false;
3910
3911	nr_pages = pgmap_vmemmap_nr(pgmap);
3912	nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
3913	/*
3914	 * For vmemmap optimization with DAX we need minimum 2 vmemmap
3915	 * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
3916	 */
3917	return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
3918}
3919/*
3920 * If we don't have an architecture override, use the generic rule
3921 */
3922#ifndef vmemmap_can_optimize
3923#define vmemmap_can_optimize __vmemmap_can_optimize
3924#endif
3925
3926#else
3927static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
3928					   struct dev_pagemap *pgmap)
3929{
3930	return false;
3931}
3932#endif
3933
3934void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
3935				  unsigned long nr_pages);
3936
3937enum mf_flags {
3938	MF_COUNT_INCREASED = 1 << 0,
3939	MF_ACTION_REQUIRED = 1 << 1,
3940	MF_MUST_KILL = 1 << 2,
3941	MF_SOFT_OFFLINE = 1 << 3,
3942	MF_UNPOISON = 1 << 4,
3943	MF_SW_SIMULATED = 1 << 5,
3944	MF_NO_RETRY = 1 << 6,
3945	MF_MEM_PRE_REMOVE = 1 << 7,
3946};
3947int mf_dax_kill_procs(struct address_space *mapping, pgoff_t index,
3948		      unsigned long count, int mf_flags);
3949extern int memory_failure(unsigned long pfn, int flags);
3950extern void memory_failure_queue_kick(int cpu);
3951extern int unpoison_memory(unsigned long pfn);
3952extern atomic_long_t num_poisoned_pages __read_mostly;
3953extern int soft_offline_page(unsigned long pfn, int flags);
3954#ifdef CONFIG_MEMORY_FAILURE
3955/*
3956 * Sysfs entries for memory failure handling statistics.
3957 */
3958extern const struct attribute_group memory_failure_attr_group;
3959extern void memory_failure_queue(unsigned long pfn, int flags);
3960extern int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3961					bool *migratable_cleared);
3962void num_poisoned_pages_inc(unsigned long pfn);
3963void num_poisoned_pages_sub(unsigned long pfn, long i);
3964#else
3965static inline void memory_failure_queue(unsigned long pfn, int flags)
3966{
3967}
3968
3969static inline int __get_huge_page_for_hwpoison(unsigned long pfn, int flags,
3970					bool *migratable_cleared)
3971{
3972	return 0;
3973}
3974
3975static inline void num_poisoned_pages_inc(unsigned long pfn)
3976{
 
3977}
3978
3979static inline void num_poisoned_pages_sub(unsigned long pfn, long i)
3980{
 
3981}
3982#endif
3983
3984#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
3985extern void memblk_nr_poison_inc(unsigned long pfn);
3986extern void memblk_nr_poison_sub(unsigned long pfn, long i);
3987#else
3988static inline void memblk_nr_poison_inc(unsigned long pfn)
3989{
3990}
3991
3992static inline void memblk_nr_poison_sub(unsigned long pfn, long i)
3993{
3994}
3995#endif
3996
3997#ifndef arch_memory_failure
3998static inline int arch_memory_failure(unsigned long pfn, int flags)
3999{
4000	return -ENXIO;
4001}
4002#endif
4003
4004#ifndef arch_is_platform_page
4005static inline bool arch_is_platform_page(u64 paddr)
4006{
4007	return false;
4008}
4009#endif
4010
4011/*
4012 * Error handlers for various types of pages.
4013 */
4014enum mf_result {
4015	MF_IGNORED,	/* Error: cannot be handled */
4016	MF_FAILED,	/* Error: handling failed */
4017	MF_DELAYED,	/* Will be handled later */
4018	MF_RECOVERED,	/* Successfully recovered */
4019};
4020
4021enum mf_action_page_type {
4022	MF_MSG_KERNEL,
4023	MF_MSG_KERNEL_HIGH_ORDER,
4024	MF_MSG_DIFFERENT_COMPOUND,
4025	MF_MSG_HUGE,
4026	MF_MSG_FREE_HUGE,
4027	MF_MSG_GET_HWPOISON,
4028	MF_MSG_UNMAP_FAILED,
4029	MF_MSG_DIRTY_SWAPCACHE,
4030	MF_MSG_CLEAN_SWAPCACHE,
4031	MF_MSG_DIRTY_MLOCKED_LRU,
4032	MF_MSG_CLEAN_MLOCKED_LRU,
4033	MF_MSG_DIRTY_UNEVICTABLE_LRU,
4034	MF_MSG_CLEAN_UNEVICTABLE_LRU,
4035	MF_MSG_DIRTY_LRU,
4036	MF_MSG_CLEAN_LRU,
4037	MF_MSG_TRUNCATED_LRU,
4038	MF_MSG_BUDDY,
4039	MF_MSG_DAX,
4040	MF_MSG_UNSPLIT_THP,
4041	MF_MSG_ALREADY_POISONED,
4042	MF_MSG_UNKNOWN,
4043};
4044
4045#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
4046void folio_zero_user(struct folio *folio, unsigned long addr_hint);
4047int copy_user_large_folio(struct folio *dst, struct folio *src,
4048			  unsigned long addr_hint,
4049			  struct vm_area_struct *vma);
4050long copy_folio_from_user(struct folio *dst_folio,
4051			   const void __user *usr_src,
4052			   bool allow_pagefault);
4053
4054/**
4055 * vma_is_special_huge - Are transhuge page-table entries considered special?
4056 * @vma: Pointer to the struct vm_area_struct to consider
4057 *
4058 * Whether transhuge page-table entries are considered "special" following
4059 * the definition in vm_normal_page().
4060 *
4061 * Return: true if transhuge page-table entries should be considered special,
4062 * false otherwise.
4063 */
4064static inline bool vma_is_special_huge(const struct vm_area_struct *vma)
4065{
4066	return vma_is_dax(vma) || (vma->vm_file &&
4067				   (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
4068}
4069
4070#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
4071
4072#if MAX_NUMNODES > 1
4073void __init setup_nr_node_ids(void);
4074#else
4075static inline void setup_nr_node_ids(void) {}
4076#endif
4077
4078extern int memcmp_pages(struct page *page1, struct page *page2);
4079
4080static inline int pages_identical(struct page *page1, struct page *page2)
4081{
4082	return !memcmp_pages(page1, page2);
4083}
4084
4085#ifdef CONFIG_MAPPING_DIRTY_HELPERS
4086unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
4087						pgoff_t first_index, pgoff_t nr,
4088						pgoff_t bitmap_pgoff,
4089						unsigned long *bitmap,
4090						pgoff_t *start,
4091						pgoff_t *end);
4092
4093unsigned long wp_shared_mapping_range(struct address_space *mapping,
4094				      pgoff_t first_index, pgoff_t nr);
4095#endif
4096
4097extern int sysctl_nr_trim_pages;
4098
4099#ifdef CONFIG_PRINTK
4100void mem_dump_obj(void *object);
4101#else
4102static inline void mem_dump_obj(void *object) {}
4103#endif
4104
4105static inline bool is_write_sealed(int seals)
4106{
4107	return seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE);
4108}
4109
4110/**
4111 * is_readonly_sealed - Checks whether write-sealed but mapped read-only,
4112 *                      in which case writes should be disallowing moving
4113 *                      forwards.
4114 * @seals: the seals to check
4115 * @vm_flags: the VMA flags to check
4116 *
4117 * Returns whether readonly sealed, in which case writess should be disallowed
4118 * going forward.
4119 */
4120static inline bool is_readonly_sealed(int seals, vm_flags_t vm_flags)
4121{
4122	/*
4123	 * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
4124	 * MAP_SHARED and read-only, take care to not allow mprotect to
4125	 * revert protections on such mappings. Do this only for shared
4126	 * mappings. For private mappings, don't need to mask
4127	 * VM_MAYWRITE as we still want them to be COW-writable.
4128	 */
4129	if (is_write_sealed(seals) &&
4130	    ((vm_flags & (VM_SHARED | VM_WRITE)) == VM_SHARED))
4131		return true;
4132
4133	return false;
4134}
4135
4136/**
4137 * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
4138 *                    handle them.
4139 * @seals: the seals to check
4140 * @vma: the vma to operate on
4141 *
4142 * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
4143 * check/handling on the vma flags.  Return 0 if check pass, or <0 for errors.
4144 */
4145static inline int seal_check_write(int seals, struct vm_area_struct *vma)
4146{
4147	if (!is_write_sealed(seals))
4148		return 0;
4149
4150	/*
4151	 * New PROT_WRITE and MAP_SHARED mmaps are not allowed when
4152	 * write seals are active.
4153	 */
4154	if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
4155		return -EPERM;
4156
4157	return 0;
4158}
4159
4160#ifdef CONFIG_ANON_VMA_NAME
4161int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4162			  unsigned long len_in,
4163			  struct anon_vma_name *anon_name);
4164#else
4165static inline int
4166madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
4167		      unsigned long len_in, struct anon_vma_name *anon_name) {
4168	return 0;
4169}
4170#endif
4171
4172#ifdef CONFIG_UNACCEPTED_MEMORY
4173
4174bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size);
4175void accept_memory(phys_addr_t start, unsigned long size);
4176
4177#else
4178
4179static inline bool range_contains_unaccepted_memory(phys_addr_t start,
4180						    unsigned long size)
4181{
4182	return false;
4183}
4184
4185static inline void accept_memory(phys_addr_t start, unsigned long size)
4186{
4187}
4188
4189#endif
4190
4191static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
4192{
4193	return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE);
4194}
4195
4196void vma_pgtable_walk_begin(struct vm_area_struct *vma);
4197void vma_pgtable_walk_end(struct vm_area_struct *vma);
4198
4199int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size);
4200
4201#ifdef CONFIG_64BIT
4202int do_mseal(unsigned long start, size_t len_in, unsigned long flags);
4203#else
4204static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags)
4205{
4206	/* noop on 32 bit */
4207	return 0;
4208}
4209#endif
4210
4211/*
4212 * user_alloc_needs_zeroing checks if a user folio from page allocator needs to
4213 * be zeroed or not.
4214 */
4215static inline bool user_alloc_needs_zeroing(void)
4216{
4217	/*
4218	 * for user folios, arch with cache aliasing requires cache flush and
4219	 * arc changes folio->flags to make icache coherent with dcache, so
4220	 * always return false to make caller use
4221	 * clear_user_page()/clear_user_highpage().
4222	 */
4223	return cpu_dcache_is_aliasing() || cpu_icache_is_aliasing() ||
4224	       !static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
4225				   &init_on_alloc);
4226}
4227
4228int arch_get_shadow_stack_status(struct task_struct *t, unsigned long __user *status);
4229int arch_set_shadow_stack_status(struct task_struct *t, unsigned long status);
4230int arch_lock_shadow_stack_status(struct task_struct *t, unsigned long status);
4231
4232#endif /* _LINUX_MM_H */