Linux Audio

Check our new training course

Loading...
v3.15
 
   1#ifndef _LINUX_MM_H
   2#define _LINUX_MM_H
   3
   4#include <linux/errno.h>
   5
   6#ifdef __KERNEL__
   7
   8#include <linux/mmdebug.h>
   9#include <linux/gfp.h>
  10#include <linux/bug.h>
  11#include <linux/list.h>
  12#include <linux/mmzone.h>
  13#include <linux/rbtree.h>
  14#include <linux/atomic.h>
  15#include <linux/debug_locks.h>
  16#include <linux/mm_types.h>
  17#include <linux/range.h>
  18#include <linux/pfn.h>
 
  19#include <linux/bit_spinlock.h>
  20#include <linux/shrinker.h>
 
 
 
 
 
  21
  22struct mempolicy;
  23struct anon_vma;
  24struct anon_vma_chain;
  25struct file_ra_state;
  26struct user_struct;
  27struct writeback_control;
 
 
 
  28
  29#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
  30extern unsigned long max_mapnr;
  31
  32static inline void set_max_mapnr(unsigned long limit)
  33{
  34	max_mapnr = limit;
  35}
  36#else
  37static inline void set_max_mapnr(unsigned long limit) { }
  38#endif
  39
  40extern unsigned long totalram_pages;
  41extern void * high_memory;
  42extern int page_cluster;
  43
  44#ifdef CONFIG_SYSCTL
  45extern int sysctl_legacy_va_layout;
  46#else
  47#define sysctl_legacy_va_layout 0
  48#endif
  49
 
 
 
 
 
 
 
 
 
 
 
  50#include <asm/page.h>
  51#include <asm/pgtable.h>
  52#include <asm/processor.h>
  53
  54#ifndef __pa_symbol
  55#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
  56#endif
  57
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  58extern unsigned long sysctl_user_reserve_kbytes;
  59extern unsigned long sysctl_admin_reserve_kbytes;
  60
  61extern int sysctl_overcommit_memory;
  62extern int sysctl_overcommit_ratio;
  63extern unsigned long sysctl_overcommit_kbytes;
  64
  65extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
  66				    size_t *, loff_t *);
  67extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
  68				    size_t *, loff_t *);
  69
  70#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
  71
  72/* to align the pointer to the (next) page boundary */
  73#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
  74
  75/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
  76#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
  77
  78/*
  79 * Linux kernel virtual memory manager primitives.
  80 * The idea being to have a "virtual" mm in the same way
  81 * we have a virtual fs - giving a cleaner interface to the
  82 * mm details, and allowing different kinds of memory mappings
  83 * (from shared memory to executable loading to arbitrary
  84 * mmap() functions).
  85 */
  86
  87extern struct kmem_cache *vm_area_cachep;
  88
  89#ifndef CONFIG_MMU
  90extern struct rb_root nommu_region_tree;
  91extern struct rw_semaphore nommu_region_sem;
  92
  93extern unsigned int kobjsize(const void *objp);
  94#endif
  95
  96/*
  97 * vm_flags in vm_area_struct, see mm_types.h.
 
  98 */
  99#define VM_NONE		0x00000000
 100
 101#define VM_READ		0x00000001	/* currently active flags */
 102#define VM_WRITE	0x00000002
 103#define VM_EXEC		0x00000004
 104#define VM_SHARED	0x00000008
 105
 106/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
 107#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
 108#define VM_MAYWRITE	0x00000020
 109#define VM_MAYEXEC	0x00000040
 110#define VM_MAYSHARE	0x00000080
 111
 112#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
 
 113#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 114#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
 
 115
 116#define VM_LOCKED	0x00002000
 117#define VM_IO           0x00004000	/* Memory mapped I/O or similar */
 118
 119					/* Used by sys_madvise() */
 120#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
 121#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
 122
 123#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
 124#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 
 125#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
 126#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 127#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 128#define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 129#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
 
 130#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
 131
 132#ifdef CONFIG_MEM_SOFT_DIRTY
 133# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
 134#else
 135# define VM_SOFTDIRTY	0
 136#endif
 137
 138#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
 139#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
 140#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
 141#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
 142
 
 
 
 
 
 
 
 
 
 
 
 
 
 143#if defined(CONFIG_X86)
 144# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
 
 
 
 
 
 
 
 145#elif defined(CONFIG_PPC)
 146# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
 147#elif defined(CONFIG_PARISC)
 148# define VM_GROWSUP	VM_ARCH_1
 149#elif defined(CONFIG_METAG)
 150# define VM_GROWSUP	VM_ARCH_1
 151#elif defined(CONFIG_IA64)
 152# define VM_GROWSUP	VM_ARCH_1
 
 
 
 153#elif !defined(CONFIG_MMU)
 154# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
 155#endif
 156
 
 
 
 
 
 
 
 157#ifndef VM_GROWSUP
 158# define VM_GROWSUP	VM_NONE
 159#endif
 160
 161/* Bits set in the VMA until the stack is in its final location */
 162#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
 163
 164#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 165#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 166#endif
 167
 168#ifdef CONFIG_STACK_GROWSUP
 169#define VM_STACK_FLAGS	(VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 170#else
 171#define VM_STACK_FLAGS	(VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 172#endif
 173
 
 
 174/*
 175 * Special vmas that are non-mergable, non-mlock()able.
 176 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
 177 */
 178#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 179
 180/* This mask defines which mm->def_flags a process can inherit its parent */
 181#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
 182
 
 
 
 
 
 
 
 
 
 183/*
 184 * mapping from the currently active vm_flags protection bits (the
 185 * low four bits) to a page protection mask..
 186 */
 187extern pgprot_t protection_map[16];
 188
 189#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
 190#define FAULT_FLAG_NONLINEAR	0x02	/* Fault was via a nonlinear mapping */
 191#define FAULT_FLAG_MKWRITE	0x04	/* Fault was mkwrite of existing pte */
 192#define FAULT_FLAG_ALLOW_RETRY	0x08	/* Retry fault if blocking */
 193#define FAULT_FLAG_RETRY_NOWAIT	0x10	/* Don't drop mmap_sem and wait when retrying */
 194#define FAULT_FLAG_KILLABLE	0x20	/* The fault task is in SIGKILL killable region */
 195#define FAULT_FLAG_TRIED	0x40	/* second try */
 196#define FAULT_FLAG_USER		0x80	/* The fault originated in userspace */
 
 
 
 
 
 
 
 
 
 
 
 
 197
 198/*
 199 * vm_fault is filled by the the pagefault handler and passed to the vma's
 200 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 201 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 202 *
 203 * pgoff should be used in favour of virtual_address, if possible. If pgoff
 204 * is used, one may implement ->remap_pages to get nonlinear mapping support.
 
 
 205 */
 206struct vm_fault {
 
 207	unsigned int flags;		/* FAULT_FLAG_xxx flags */
 
 208	pgoff_t pgoff;			/* Logical page offset based on vma */
 209	void __user *virtual_address;	/* Faulting virtual address */
 
 
 
 
 
 
 210
 
 
 211	struct page *page;		/* ->fault handlers should return a
 212					 * page here, unless VM_FAULT_NOPAGE
 213					 * is set (which is also implied by
 214					 * VM_FAULT_ERROR).
 215					 */
 216	/* for ->map_pages() only */
 217	pgoff_t max_pgoff;		/* map pages for offset from pgoff till
 218					 * max_pgoff inclusive */
 219	pte_t *pte;			/* pte entry associated with ->pgoff */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 220};
 221
 222/*
 223 * These are the virtual MM functions - opening of an area, closing and
 224 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 225 * to the functions called when a no-page or a wp-page exception occurs. 
 226 */
 227struct vm_operations_struct {
 228	void (*open)(struct vm_area_struct * area);
 229	void (*close)(struct vm_area_struct * area);
 230	int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
 231	void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 
 
 
 
 
 232
 233	/* notification that a previously read-only page is about to become
 234	 * writable, if an error is returned it will cause a SIGBUS */
 235	int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
 
 
 
 236
 237	/* called by access_process_vm when get_user_pages() fails, typically
 238	 * for use by special VMAs that can switch between memory and hardware
 239	 */
 240	int (*access)(struct vm_area_struct *vma, unsigned long addr,
 241		      void *buf, int len, int write);
 
 
 
 
 
 
 242#ifdef CONFIG_NUMA
 243	/*
 244	 * set_policy() op must add a reference to any non-NULL @new mempolicy
 245	 * to hold the policy upon return.  Caller should pass NULL @new to
 246	 * remove a policy and fall back to surrounding context--i.e. do not
 247	 * install a MPOL_DEFAULT policy, nor the task or system default
 248	 * mempolicy.
 249	 */
 250	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 251
 252	/*
 253	 * get_policy() op must add reference [mpol_get()] to any policy at
 254	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
 255	 * in mm/mempolicy.c will do this automatically.
 256	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
 257	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
 258	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
 259	 * must return NULL--i.e., do not "fallback" to task or system default
 260	 * policy.
 261	 */
 262	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
 263					unsigned long addr);
 264	int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
 265		const nodemask_t *to, unsigned long flags);
 266#endif
 267	/* called by sys_remap_file_pages() to populate non-linear mapping */
 268	int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
 269			   unsigned long size, pgoff_t pgoff);
 
 
 
 
 270};
 271
 272struct mmu_gather;
 273struct inode;
 274
 275#define page_private(page)		((page)->private)
 276#define set_page_private(page, v)	((page)->private = (v))
 277
 278/* It's valid only if the page is free path or free_list */
 279static inline void set_freepage_migratetype(struct page *page, int migratetype)
 280{
 281	page->index = migratetype;
 282}
 283
 284/* It's valid only if the page is free path or free_list */
 285static inline int get_freepage_migratetype(struct page *page)
 286{
 287	return page->index;
 
 
 
 
 288}
 
 289
 290/*
 291 * FIXME: take this include out, include page-flags.h in
 292 * files which need it (119 of them)
 293 */
 294#include <linux/page-flags.h>
 295#include <linux/huge_mm.h>
 296
 297/*
 298 * Methods to modify the page usage count.
 299 *
 300 * What counts for a page usage:
 301 * - cache mapping   (page->mapping)
 302 * - private data    (page->private)
 303 * - page mapped in a task's page tables, each mapping
 304 *   is counted separately
 305 *
 306 * Also, many kernel routines increase the page count before a critical
 307 * routine so they can be sure the page doesn't go away from under them.
 308 */
 309
 310/*
 311 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 312 */
 313static inline int put_page_testzero(struct page *page)
 314{
 315	VM_BUG_ON_PAGE(atomic_read(&page->_count) == 0, page);
 316	return atomic_dec_and_test(&page->_count);
 317}
 318
 319/*
 320 * Try to grab a ref unless the page has a refcount of zero, return false if
 321 * that is the case.
 322 * This can be called when MMU is off so it must not access
 323 * any of the virtual mappings.
 324 */
 325static inline int get_page_unless_zero(struct page *page)
 326{
 327	return atomic_inc_not_zero(&page->_count);
 328}
 329
 330/*
 331 * Try to drop a ref unless the page has a refcount of one, return false if
 332 * that is the case.
 333 * This is to make sure that the refcount won't become zero after this drop.
 334 * This can be called when MMU is off so it must not access
 335 * any of the virtual mappings.
 336 */
 337static inline int put_page_unless_one(struct page *page)
 338{
 339	return atomic_add_unless(&page->_count, -1, 1);
 340}
 341
 342extern int page_is_ram(unsigned long pfn);
 343
 
 
 
 
 
 
 
 
 
 344/* Support for virtually mapped pages */
 345struct page *vmalloc_to_page(const void *addr);
 346unsigned long vmalloc_to_pfn(const void *addr);
 347
 348/*
 349 * Determine if an address is within the vmalloc range
 350 *
 351 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 352 * is no special casing required.
 353 */
 354static inline int is_vmalloc_addr(const void *x)
 355{
 356#ifdef CONFIG_MMU
 357	unsigned long addr = (unsigned long)x;
 358
 359	return addr >= VMALLOC_START && addr < VMALLOC_END;
 360#else
 361	return 0;
 362#endif
 363}
 364#ifdef CONFIG_MMU
 365extern int is_vmalloc_or_module_addr(const void *x);
 366#else
 367static inline int is_vmalloc_or_module_addr(const void *x)
 368{
 369	return 0;
 370}
 371#endif
 372
 373extern void kvfree(const void *addr);
 374
 375static inline void compound_lock(struct page *page)
 376{
 377#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 378	VM_BUG_ON_PAGE(PageSlab(page), page);
 379	bit_spin_lock(PG_compound_lock, &page->flags);
 380#endif
 381}
 382
 383static inline void compound_unlock(struct page *page)
 384{
 385#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 386	VM_BUG_ON_PAGE(PageSlab(page), page);
 387	bit_spin_unlock(PG_compound_lock, &page->flags);
 388#endif
 389}
 390
 391static inline unsigned long compound_lock_irqsave(struct page *page)
 392{
 393	unsigned long uninitialized_var(flags);
 394#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 395	local_irq_save(flags);
 396	compound_lock(page);
 397#endif
 398	return flags;
 399}
 400
 401static inline void compound_unlock_irqrestore(struct page *page,
 402					      unsigned long flags)
 403{
 404#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 405	compound_unlock(page);
 406	local_irq_restore(flags);
 407#endif
 408}
 409
 410static inline struct page *compound_head(struct page *page)
 
 
 411{
 412	if (unlikely(PageTail(page))) {
 413		struct page *head = page->first_page;
 414
 415		/*
 416		 * page->first_page may be a dangling pointer to an old
 417		 * compound page, so recheck that it is still a tail
 418		 * page before returning.
 419		 */
 420		smp_rmb();
 421		if (likely(PageTail(page)))
 422			return head;
 423	}
 424	return page;
 425}
 426
 427/*
 428 * The atomic page->_mapcount, starts from -1: so that transitions
 429 * both from it and to it can be tracked, using atomic_inc_and_test
 430 * and atomic_add_negative(-1).
 431 */
 432static inline void page_mapcount_reset(struct page *page)
 433{
 434	atomic_set(&(page)->_mapcount, -1);
 435}
 436
 437static inline int page_mapcount(struct page *page)
 438{
 439	return atomic_read(&(page)->_mapcount) + 1;
 440}
 441
 442static inline int page_count(struct page *page)
 443{
 444	return atomic_read(&compound_head(page)->_count);
 445}
 446
 447#ifdef CONFIG_HUGETLB_PAGE
 448extern int PageHeadHuge(struct page *page_head);
 449#else /* CONFIG_HUGETLB_PAGE */
 450static inline int PageHeadHuge(struct page *page_head)
 451{
 452	return 0;
 453}
 454#endif /* CONFIG_HUGETLB_PAGE */
 455
 456static inline bool __compound_tail_refcounted(struct page *page)
 457{
 458	return !PageSlab(page) && !PageHeadHuge(page);
 459}
 460
 461/*
 462 * This takes a head page as parameter and tells if the
 463 * tail page reference counting can be skipped.
 464 *
 465 * For this to be safe, PageSlab and PageHeadHuge must remain true on
 466 * any given page where they return true here, until all tail pins
 467 * have been released.
 468 */
 469static inline bool compound_tail_refcounted(struct page *page)
 470{
 471	VM_BUG_ON_PAGE(!PageHead(page), page);
 472	return __compound_tail_refcounted(page);
 473}
 474
 475static inline void get_huge_page_tail(struct page *page)
 476{
 477	/*
 478	 * __split_huge_page_refcount() cannot run from under us.
 479	 */
 480	VM_BUG_ON_PAGE(!PageTail(page), page);
 481	VM_BUG_ON_PAGE(page_mapcount(page) < 0, page);
 482	VM_BUG_ON_PAGE(atomic_read(&page->_count) != 0, page);
 483	if (compound_tail_refcounted(page->first_page))
 484		atomic_inc(&page->_mapcount);
 485}
 486
 487extern bool __get_page_tail(struct page *page);
 488
 489static inline void get_page(struct page *page)
 490{
 491	if (unlikely(PageTail(page)))
 492		if (likely(__get_page_tail(page)))
 493			return;
 494	/*
 495	 * Getting a normal page or the head of a compound page
 496	 * requires to already have an elevated page->_count.
 497	 */
 498	VM_BUG_ON_PAGE(atomic_read(&page->_count) <= 0, page);
 499	atomic_inc(&page->_count);
 500}
 
 501
 502static inline struct page *virt_to_head_page(const void *x)
 503{
 504	struct page *page = virt_to_page(x);
 505	return compound_head(page);
 506}
 507
 508/*
 509 * Setup the page count before being freed into the page allocator for
 510 * the first time (boot or memory hotplug)
 511 */
 512static inline void init_page_count(struct page *page)
 513{
 514	atomic_set(&page->_count, 1);
 515}
 516
 517/*
 518 * PageBuddy() indicate that the page is free and in the buddy system
 519 * (see mm/page_alloc.c).
 520 *
 521 * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
 522 * -2 so that an underflow of the page_mapcount() won't be mistaken
 523 * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
 524 * efficiently by most CPU architectures.
 525 */
 526#define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
 527
 528static inline int PageBuddy(struct page *page)
 529{
 530	return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
 531}
 532
 533static inline void __SetPageBuddy(struct page *page)
 534{
 535	VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page);
 536	atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
 537}
 538
 539static inline void __ClearPageBuddy(struct page *page)
 540{
 541	VM_BUG_ON_PAGE(!PageBuddy(page), page);
 542	atomic_set(&page->_mapcount, -1);
 543}
 544
 545void put_page(struct page *page);
 546void put_pages_list(struct list_head *pages);
 547
 548void split_page(struct page *page, unsigned int order);
 549int split_free_page(struct page *page);
 550
 551/*
 552 * Compound pages have a destructor function.  Provide a
 553 * prototype for that function and accessor functions.
 554 * These are _only_ valid on the head of a PG_compound page.
 555 */
 556typedef void compound_page_dtor(struct page *);
 557
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 558static inline void set_compound_page_dtor(struct page *page,
 559						compound_page_dtor *dtor)
 560{
 561	page[1].lru.next = (void *)dtor;
 
 562}
 563
 564static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
 565{
 566	return (compound_page_dtor *)page[1].lru.next;
 
 567}
 568
 569static inline int compound_order(struct page *page)
 570{
 571	if (!PageHead(page))
 572		return 0;
 573	return (unsigned long)page[1].lru.prev;
 574}
 575
 576static inline void set_compound_order(struct page *page, unsigned long order)
 577{
 578	page[1].lru.prev = (void *)order;
 579}
 580
 
 
 581#ifdef CONFIG_MMU
 582/*
 583 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 584 * servicing faults for write access.  In the normal case, do always want
 585 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 586 * that do not have writing enabled, when used by access_process_vm.
 587 */
 588static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 589{
 590	if (likely(vma->vm_flags & VM_WRITE))
 591		pte = pte_mkwrite(pte);
 592	return pte;
 593}
 594
 595void do_set_pte(struct vm_area_struct *vma, unsigned long address,
 596		struct page *page, pte_t *pte, bool write, bool anon);
 
 
 597#endif
 598
 599/*
 600 * Multiple processes may "see" the same page. E.g. for untouched
 601 * mappings of /dev/null, all processes see the same page full of
 602 * zeroes, and text pages of executables and shared libraries have
 603 * only one copy in memory, at most, normally.
 604 *
 605 * For the non-reserved pages, page_count(page) denotes a reference count.
 606 *   page_count() == 0 means the page is free. page->lru is then used for
 607 *   freelist management in the buddy allocator.
 608 *   page_count() > 0  means the page has been allocated.
 609 *
 610 * Pages are allocated by the slab allocator in order to provide memory
 611 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 612 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 613 * unless a particular usage is carefully commented. (the responsibility of
 614 * freeing the kmalloc memory is the caller's, of course).
 615 *
 616 * A page may be used by anyone else who does a __get_free_page().
 617 * In this case, page_count still tracks the references, and should only
 618 * be used through the normal accessor functions. The top bits of page->flags
 619 * and page->virtual store page management information, but all other fields
 620 * are unused and could be used privately, carefully. The management of this
 621 * page is the responsibility of the one who allocated it, and those who have
 622 * subsequently been given references to it.
 623 *
 624 * The other pages (we may call them "pagecache pages") are completely
 625 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 626 * The following discussion applies only to them.
 627 *
 628 * A pagecache page contains an opaque `private' member, which belongs to the
 629 * page's address_space. Usually, this is the address of a circular list of
 630 * the page's disk buffers. PG_private must be set to tell the VM to call
 631 * into the filesystem to release these pages.
 632 *
 633 * A page may belong to an inode's memory mapping. In this case, page->mapping
 634 * is the pointer to the inode, and page->index is the file offset of the page,
 635 * in units of PAGE_CACHE_SIZE.
 636 *
 637 * If pagecache pages are not associated with an inode, they are said to be
 638 * anonymous pages. These may become associated with the swapcache, and in that
 639 * case PG_swapcache is set, and page->private is an offset into the swapcache.
 640 *
 641 * In either case (swapcache or inode backed), the pagecache itself holds one
 642 * reference to the page. Setting PG_private should also increment the
 643 * refcount. The each user mapping also has a reference to the page.
 644 *
 645 * The pagecache pages are stored in a per-mapping radix tree, which is
 646 * rooted at mapping->page_tree, and indexed by offset.
 647 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 648 * lists, we instead now tag pages as dirty/writeback in the radix tree.
 649 *
 650 * All pagecache pages may be subject to I/O:
 651 * - inode pages may need to be read from disk,
 652 * - inode pages which have been modified and are MAP_SHARED may need
 653 *   to be written back to the inode on disk,
 654 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 655 *   modified may need to be swapped out to swap space and (later) to be read
 656 *   back into memory.
 657 */
 658
 659/*
 660 * The zone field is never updated after free_area_init_core()
 661 * sets it, so none of the operations on it need to be atomic.
 662 */
 663
 664/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
 665#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
 666#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
 667#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
 668#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
 669
 670/*
 671 * Define the bit shifts to access each section.  For non-existent
 672 * sections we define the shift as 0; that plus a 0 mask ensures
 673 * the compiler will optimise away reference to them.
 674 */
 675#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
 676#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
 677#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
 678#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
 679
 680/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
 681#ifdef NODE_NOT_IN_PAGE_FLAGS
 682#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
 683#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
 684						SECTIONS_PGOFF : ZONES_PGOFF)
 685#else
 686#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
 687#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
 688						NODES_PGOFF : ZONES_PGOFF)
 689#endif
 690
 691#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 692
 693#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 694#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 695#endif
 696
 697#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
 698#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
 699#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
 700#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
 701#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
 702
 703static inline enum zone_type page_zonenum(const struct page *page)
 704{
 705	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 706}
 707
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 708#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 709#define SECTION_IN_PAGE_FLAGS
 710#endif
 711
 712/*
 713 * The identification function is mainly used by the buddy allocator for
 714 * determining if two pages could be buddies. We are not really identifying
 715 * the zone since we could be using the section number id if we do not have
 716 * node id available in page flags.
 717 * We only guarantee that it will return the same value for two combinable
 718 * pages in a zone.
 719 */
 720static inline int page_zone_id(struct page *page)
 721{
 722	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 723}
 724
 725static inline int zone_to_nid(struct zone *zone)
 726{
 727#ifdef CONFIG_NUMA
 728	return zone->node;
 729#else
 730	return 0;
 731#endif
 732}
 733
 734#ifdef NODE_NOT_IN_PAGE_FLAGS
 735extern int page_to_nid(const struct page *page);
 736#else
 737static inline int page_to_nid(const struct page *page)
 738{
 739	return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
 
 
 740}
 741#endif
 742
 743#ifdef CONFIG_NUMA_BALANCING
 744static inline int cpu_pid_to_cpupid(int cpu, int pid)
 745{
 746	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
 747}
 748
 749static inline int cpupid_to_pid(int cpupid)
 750{
 751	return cpupid & LAST__PID_MASK;
 752}
 753
 754static inline int cpupid_to_cpu(int cpupid)
 755{
 756	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
 757}
 758
 759static inline int cpupid_to_nid(int cpupid)
 760{
 761	return cpu_to_node(cpupid_to_cpu(cpupid));
 762}
 763
 764static inline bool cpupid_pid_unset(int cpupid)
 765{
 766	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
 767}
 768
 769static inline bool cpupid_cpu_unset(int cpupid)
 770{
 771	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
 772}
 773
 774static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
 775{
 776	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
 777}
 778
 779#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
 780#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 781static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 782{
 783	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
 784}
 785
 786static inline int page_cpupid_last(struct page *page)
 787{
 788	return page->_last_cpupid;
 789}
 790static inline void page_cpupid_reset_last(struct page *page)
 791{
 792	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
 793}
 794#else
 795static inline int page_cpupid_last(struct page *page)
 796{
 797	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
 798}
 799
 800extern int page_cpupid_xchg_last(struct page *page, int cpupid);
 801
 802static inline void page_cpupid_reset_last(struct page *page)
 803{
 804	int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
 805
 806	page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
 807	page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
 808}
 809#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
 810#else /* !CONFIG_NUMA_BALANCING */
 811static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 812{
 813	return page_to_nid(page); /* XXX */
 814}
 815
 816static inline int page_cpupid_last(struct page *page)
 817{
 818	return page_to_nid(page); /* XXX */
 819}
 820
 821static inline int cpupid_to_nid(int cpupid)
 822{
 823	return -1;
 824}
 825
 826static inline int cpupid_to_pid(int cpupid)
 827{
 828	return -1;
 829}
 830
 831static inline int cpupid_to_cpu(int cpupid)
 832{
 833	return -1;
 834}
 835
 836static inline int cpu_pid_to_cpupid(int nid, int pid)
 837{
 838	return -1;
 839}
 840
 841static inline bool cpupid_pid_unset(int cpupid)
 842{
 843	return 1;
 844}
 845
 846static inline void page_cpupid_reset_last(struct page *page)
 847{
 848}
 849
 850static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
 851{
 852	return false;
 853}
 854#endif /* CONFIG_NUMA_BALANCING */
 855
 856static inline struct zone *page_zone(const struct page *page)
 857{
 858	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
 859}
 860
 
 
 
 
 
 861#ifdef SECTION_IN_PAGE_FLAGS
 862static inline void set_page_section(struct page *page, unsigned long section)
 863{
 864	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
 865	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
 866}
 867
 868static inline unsigned long page_to_section(const struct page *page)
 869{
 870	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
 871}
 872#endif
 873
 874static inline void set_page_zone(struct page *page, enum zone_type zone)
 875{
 876	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
 877	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
 878}
 879
 880static inline void set_page_node(struct page *page, unsigned long node)
 881{
 882	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
 883	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
 884}
 885
 886static inline void set_page_links(struct page *page, enum zone_type zone,
 887	unsigned long node, unsigned long pfn)
 888{
 889	set_page_zone(page, zone);
 890	set_page_node(page, node);
 891#ifdef SECTION_IN_PAGE_FLAGS
 892	set_page_section(page, pfn_to_section_nr(pfn));
 893#endif
 894}
 895
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896/*
 897 * Some inline functions in vmstat.h depend on page_zone()
 898 */
 899#include <linux/vmstat.h>
 900
 901static __always_inline void *lowmem_page_address(const struct page *page)
 902{
 903	return __va(PFN_PHYS(page_to_pfn(page)));
 904}
 905
 906#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
 907#define HASHED_PAGE_VIRTUAL
 908#endif
 909
 910#if defined(WANT_PAGE_VIRTUAL)
 911static inline void *page_address(const struct page *page)
 912{
 913	return page->virtual;
 914}
 915static inline void set_page_address(struct page *page, void *address)
 916{
 917	page->virtual = address;
 918}
 919#define page_address_init()  do { } while(0)
 920#endif
 921
 922#if defined(HASHED_PAGE_VIRTUAL)
 923void *page_address(const struct page *page);
 924void set_page_address(struct page *page, void *virtual);
 925void page_address_init(void);
 926#endif
 927
 928#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
 929#define page_address(page) lowmem_page_address(page)
 930#define set_page_address(page, address)  do { } while(0)
 931#define page_address_init()  do { } while(0)
 932#endif
 933
 934/*
 935 * On an anonymous page mapped into a user virtual memory area,
 936 * page->mapping points to its anon_vma, not to a struct address_space;
 937 * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
 938 *
 939 * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
 940 * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
 941 * and then page->mapping points, not to an anon_vma, but to a private
 942 * structure which KSM associates with that merged page.  See ksm.h.
 943 *
 944 * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
 945 *
 946 * Please note that, confusingly, "page_mapping" refers to the inode
 947 * address_space which maps the page from disk; whereas "page_mapped"
 948 * refers to user virtual address space into which the page is mapped.
 949 */
 950#define PAGE_MAPPING_ANON	1
 951#define PAGE_MAPPING_KSM	2
 952#define PAGE_MAPPING_FLAGS	(PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
 953
 954extern struct address_space *page_mapping(struct page *page);
 955
 956/* Neutral page->mapping pointer to address_space or anon_vma or other */
 957static inline void *page_rmapping(struct page *page)
 958{
 959	return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
 960}
 961
 962extern struct address_space *__page_file_mapping(struct page *);
 963
 964static inline
 965struct address_space *page_file_mapping(struct page *page)
 966{
 967	if (unlikely(PageSwapCache(page)))
 968		return __page_file_mapping(page);
 969
 970	return page->mapping;
 971}
 972
 973static inline int PageAnon(struct page *page)
 974{
 975	return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
 976}
 977
 978/*
 979 * Return the pagecache index of the passed page.  Regular pagecache pages
 980 * use ->index whereas swapcache pages use ->private
 981 */
 982static inline pgoff_t page_index(struct page *page)
 983{
 984	if (unlikely(PageSwapCache(page)))
 985		return page_private(page);
 986	return page->index;
 987}
 988
 989extern pgoff_t __page_file_index(struct page *page);
 
 
 990
 991/*
 992 * Return the file index of the page. Regular pagecache pages use ->index
 993 * whereas swapcache pages use swp_offset(->private)
 
 994 */
 995static inline pgoff_t page_file_index(struct page *page)
 996{
 997	if (unlikely(PageSwapCache(page)))
 998		return __page_file_index(page);
 999
1000	return page->index;
 
1001}
1002
1003/*
1004 * Return true if this page is mapped into pagetables.
 
1005 */
1006static inline int page_mapped(struct page *page)
1007{
1008	return atomic_read(&(page)->_mapcount) >= 0;
 
 
 
 
 
1009}
1010
1011/*
1012 * Different kinds of faults, as returned by handle_mm_fault().
1013 * Used to decide whether a process gets delivered SIGBUS or
1014 * just gets major/minor fault counters bumped up.
1015 */
1016
1017#define VM_FAULT_MINOR	0 /* For backwards compat. Remove me quickly. */
1018
1019#define VM_FAULT_OOM	0x0001
1020#define VM_FAULT_SIGBUS	0x0002
1021#define VM_FAULT_MAJOR	0x0004
1022#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
1023#define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
1024#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
 
1025
1026#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
1027#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
1028#define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
1029#define VM_FAULT_FALLBACK 0x0800	/* huge page fault failed, fall back to small */
1030
1031#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
1032
1033#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
1034			 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1035
1036/* Encode hstate index for a hwpoisoned large page */
1037#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1038#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1039
1040/*
1041 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1042 */
1043extern void pagefault_out_of_memory(void);
1044
1045#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
1046
1047/*
1048 * Flags passed to show_mem() and show_free_areas() to suppress output in
1049 * various contexts.
1050 */
1051#define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
1052
1053extern void show_free_areas(unsigned int flags);
1054extern bool skip_free_areas_node(unsigned int flags, int nid);
1055
1056int shmem_zero_setup(struct vm_area_struct *);
1057#ifdef CONFIG_SHMEM
1058bool shmem_mapping(struct address_space *mapping);
1059#else
1060static inline bool shmem_mapping(struct address_space *mapping)
1061{
1062	return false;
1063}
1064#endif
1065
1066extern int can_do_mlock(void);
1067extern int user_shm_lock(size_t, struct user_struct *);
1068extern void user_shm_unlock(size_t, struct user_struct *);
1069
1070/*
1071 * Parameter block passed down to zap_pte_range in exceptional cases.
1072 */
1073struct zap_details {
1074	struct vm_area_struct *nonlinear_vma;	/* Check page->index if set */
1075	struct address_space *check_mapping;	/* Check page->mapping if set */
1076	pgoff_t	first_index;			/* Lowest page->index to unmap */
1077	pgoff_t last_index;			/* Highest page->index to unmap */
1078};
1079
1080struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1081		pte_t pte);
 
 
 
 
1082
1083int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1084		unsigned long size);
1085void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1086		unsigned long size, struct zap_details *);
1087void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1088		unsigned long start, unsigned long end);
1089
1090/**
1091 * mm_walk - callbacks for walk_page_range
1092 * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
1093 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
 
 
 
1094 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1095 *	       this handler is required to be able to handle
1096 *	       pmd_trans_huge() pmds.  They may simply choose to
1097 *	       split_huge_page() instead of handling it explicitly.
1098 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1099 * @pte_hole: if set, called for each hole at all levels
1100 * @hugetlb_entry: if set, called for each hugetlb entry
1101 *		   *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
1102 * 			      is used.
 
 
 
 
 
 
1103 *
1104 * (see walk_page_range for more details)
1105 */
1106struct mm_walk {
1107	int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
1108			 unsigned long next, struct mm_walk *walk);
1109	int (*pud_entry)(pud_t *pud, unsigned long addr,
1110	                 unsigned long next, struct mm_walk *walk);
1111	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1112			 unsigned long next, struct mm_walk *walk);
1113	int (*pte_entry)(pte_t *pte, unsigned long addr,
1114			 unsigned long next, struct mm_walk *walk);
1115	int (*pte_hole)(unsigned long addr, unsigned long next,
1116			struct mm_walk *walk);
1117	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1118			     unsigned long addr, unsigned long next,
1119			     struct mm_walk *walk);
 
 
1120	struct mm_struct *mm;
 
1121	void *private;
1122};
1123
1124int walk_page_range(unsigned long addr, unsigned long end,
1125		struct mm_walk *walk);
 
1126void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1127		unsigned long end, unsigned long floor, unsigned long ceiling);
1128int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1129			struct vm_area_struct *vma);
1130void unmap_mapping_range(struct address_space *mapping,
1131		loff_t const holebegin, loff_t const holelen, int even_cows);
 
1132int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1133	unsigned long *pfn);
1134int follow_phys(struct vm_area_struct *vma, unsigned long address,
1135		unsigned int flags, unsigned long *prot, resource_size_t *phys);
1136int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1137			void *buf, int len, int write);
1138
1139static inline void unmap_shared_mapping_range(struct address_space *mapping,
1140		loff_t const holebegin, loff_t const holelen)
1141{
1142	unmap_mapping_range(mapping, holebegin, holelen, 0);
1143}
1144
1145extern void truncate_pagecache(struct inode *inode, loff_t new);
1146extern void truncate_setsize(struct inode *inode, loff_t newsize);
 
1147void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1148int truncate_inode_page(struct address_space *mapping, struct page *page);
1149int generic_error_remove_page(struct address_space *mapping, struct page *page);
1150int invalidate_inode_page(struct page *page);
1151
1152#ifdef CONFIG_MMU
1153extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1154			unsigned long address, unsigned int flags);
1155extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1156			    unsigned long address, unsigned int fault_flags);
 
 
 
 
 
1157#else
1158static inline int handle_mm_fault(struct mm_struct *mm,
1159			struct vm_area_struct *vma, unsigned long address,
1160			unsigned int flags)
1161{
1162	/* should never happen if there's no MMU */
1163	BUG();
1164	return VM_FAULT_SIGBUS;
1165}
1166static inline int fixup_user_fault(struct task_struct *tsk,
1167		struct mm_struct *mm, unsigned long address,
1168		unsigned int fault_flags)
1169{
1170	/* should never happen if there's no MMU */
1171	BUG();
1172	return -EFAULT;
1173}
 
 
 
 
1174#endif
1175
1176extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
 
 
 
 
 
 
 
1177extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1178		void *buf, int len, int write);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1179
1180long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1181		      unsigned long start, unsigned long nr_pages,
1182		      unsigned int foll_flags, struct page **pages,
1183		      struct vm_area_struct **vmas, int *nonblocking);
1184long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
1185		    unsigned long start, unsigned long nr_pages,
1186		    int write, int force, struct page **pages,
1187		    struct vm_area_struct **vmas);
1188int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1189			struct page **pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190struct kvec;
1191int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1192			struct page **pages);
1193int get_kernel_page(unsigned long start, int write, struct page **pages);
1194struct page *get_dump_page(unsigned long addr);
1195
1196extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1197extern void do_invalidatepage(struct page *page, unsigned int offset,
1198			      unsigned int length);
1199
 
1200int __set_page_dirty_nobuffers(struct page *page);
1201int __set_page_dirty_no_writeback(struct page *page);
1202int redirty_page_for_writepage(struct writeback_control *wbc,
1203				struct page *page);
1204void account_page_dirtied(struct page *page, struct address_space *mapping);
1205void account_page_writeback(struct page *page);
 
1206int set_page_dirty(struct page *page);
1207int set_page_dirty_lock(struct page *page);
1208int clear_page_dirty_for_io(struct page *page);
1209int get_cmdline(struct task_struct *task, char *buffer, int buflen);
1210
1211/* Is the vma a continuation of the stack vma above it? */
1212static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
1213{
1214	return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
 
 
1215}
 
1216
1217static inline int stack_guard_page_start(struct vm_area_struct *vma,
1218					     unsigned long addr)
1219{
1220	return (vma->vm_flags & VM_GROWSDOWN) &&
1221		(vma->vm_start == addr) &&
1222		!vma_growsdown(vma->vm_prev, addr);
1223}
1224
1225/* Is the vma a continuation of the stack vma below it? */
1226static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
1227{
1228	return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
1229}
1230
1231static inline int stack_guard_page_end(struct vm_area_struct *vma,
1232					   unsigned long addr)
1233{
1234	return (vma->vm_flags & VM_GROWSUP) &&
1235		(vma->vm_end == addr) &&
1236		!vma_growsup(vma->vm_next, addr);
1237}
 
 
1238
1239extern pid_t
1240vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1241
1242extern unsigned long move_page_tables(struct vm_area_struct *vma,
1243		unsigned long old_addr, struct vm_area_struct *new_vma,
1244		unsigned long new_addr, unsigned long len,
1245		bool need_rmap_locks);
1246extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1247			      unsigned long end, pgprot_t newprot,
1248			      int dirty_accountable, int prot_numa);
1249extern int mprotect_fixup(struct vm_area_struct *vma,
1250			  struct vm_area_struct **pprev, unsigned long start,
1251			  unsigned long end, unsigned long newflags);
1252
1253/*
1254 * doesn't attempt to fault and will return short.
1255 */
1256int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1257			  struct page **pages);
1258/*
1259 * per-process(per-mm_struct) statistics.
1260 */
1261static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1262{
1263	long val = atomic_long_read(&mm->rss_stat.count[member]);
1264
1265#ifdef SPLIT_RSS_COUNTING
1266	/*
1267	 * counter is updated in asynchronous manner and may go to minus.
1268	 * But it's never be expected number for users.
1269	 */
1270	if (val < 0)
1271		val = 0;
1272#endif
1273	return (unsigned long)val;
1274}
1275
1276static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1277{
1278	atomic_long_add(value, &mm->rss_stat.count[member]);
1279}
1280
1281static inline void inc_mm_counter(struct mm_struct *mm, int member)
1282{
1283	atomic_long_inc(&mm->rss_stat.count[member]);
1284}
1285
1286static inline void dec_mm_counter(struct mm_struct *mm, int member)
1287{
1288	atomic_long_dec(&mm->rss_stat.count[member]);
1289}
1290
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1291static inline unsigned long get_mm_rss(struct mm_struct *mm)
1292{
1293	return get_mm_counter(mm, MM_FILEPAGES) +
1294		get_mm_counter(mm, MM_ANONPAGES);
 
1295}
1296
1297static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1298{
1299	return max(mm->hiwater_rss, get_mm_rss(mm));
1300}
1301
1302static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1303{
1304	return max(mm->hiwater_vm, mm->total_vm);
1305}
1306
1307static inline void update_hiwater_rss(struct mm_struct *mm)
1308{
1309	unsigned long _rss = get_mm_rss(mm);
1310
1311	if ((mm)->hiwater_rss < _rss)
1312		(mm)->hiwater_rss = _rss;
1313}
1314
1315static inline void update_hiwater_vm(struct mm_struct *mm)
1316{
1317	if (mm->hiwater_vm < mm->total_vm)
1318		mm->hiwater_vm = mm->total_vm;
1319}
1320
 
 
 
 
 
1321static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1322					 struct mm_struct *mm)
1323{
1324	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1325
1326	if (*maxrss < hiwater_rss)
1327		*maxrss = hiwater_rss;
1328}
1329
1330#if defined(SPLIT_RSS_COUNTING)
1331void sync_mm_rss(struct mm_struct *mm);
1332#else
1333static inline void sync_mm_rss(struct mm_struct *mm)
1334{
1335}
1336#endif
1337
1338int vma_wants_writenotify(struct vm_area_struct *vma);
 
 
 
 
 
 
 
1339
1340extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1341			       spinlock_t **ptl);
1342static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1343				    spinlock_t **ptl)
1344{
1345	pte_t *ptep;
1346	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1347	return ptep;
1348}
1349
1350#ifdef __PAGETABLE_PUD_FOLDED
1351static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
 
 
 
 
 
 
 
 
 
 
1352						unsigned long address)
1353{
1354	return 0;
1355}
 
 
 
1356#else
1357int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
 
 
 
 
 
 
 
 
 
 
1358#endif
1359
1360#ifdef __PAGETABLE_PMD_FOLDED
1361static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1362						unsigned long address)
1363{
1364	return 0;
1365}
 
 
 
 
1366#else
1367int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
 
 
 
 
 
 
 
 
 
 
1368#endif
1369
1370int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
1371		pmd_t *pmd, unsigned long address);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1372int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1373
1374/*
1375 * The following ifdef needed to get the 4level-fixup.h header to work.
1376 * Remove it when 4level-fixup.h has been removed.
1377 */
1378#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1379static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
 
 
 
1380{
1381	return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
1382		NULL: pud_offset(pgd, address);
1383}
1384
 
 
 
 
 
 
 
 
1385static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1386{
1387	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1388		NULL: pmd_offset(pud, address);
1389}
1390#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1391
1392#if USE_SPLIT_PTE_PTLOCKS
1393#if ALLOC_SPLIT_PTLOCKS
1394void __init ptlock_cache_init(void);
1395extern bool ptlock_alloc(struct page *page);
1396extern void ptlock_free(struct page *page);
1397
1398static inline spinlock_t *ptlock_ptr(struct page *page)
1399{
1400	return page->ptl;
1401}
1402#else /* ALLOC_SPLIT_PTLOCKS */
1403static inline void ptlock_cache_init(void)
1404{
1405}
1406
1407static inline bool ptlock_alloc(struct page *page)
1408{
1409	return true;
1410}
1411
1412static inline void ptlock_free(struct page *page)
1413{
1414}
1415
1416static inline spinlock_t *ptlock_ptr(struct page *page)
1417{
1418	return &page->ptl;
1419}
1420#endif /* ALLOC_SPLIT_PTLOCKS */
1421
1422static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1423{
1424	return ptlock_ptr(pmd_page(*pmd));
1425}
1426
1427static inline bool ptlock_init(struct page *page)
1428{
1429	/*
1430	 * prep_new_page() initialize page->private (and therefore page->ptl)
1431	 * with 0. Make sure nobody took it in use in between.
1432	 *
1433	 * It can happen if arch try to use slab for page table allocation:
1434	 * slab code uses page->slab_cache and page->first_page (for tail
1435	 * pages), which share storage with page->ptl.
1436	 */
1437	VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1438	if (!ptlock_alloc(page))
1439		return false;
1440	spin_lock_init(ptlock_ptr(page));
1441	return true;
1442}
1443
1444/* Reset page->mapping so free_pages_check won't complain. */
1445static inline void pte_lock_deinit(struct page *page)
1446{
1447	page->mapping = NULL;
1448	ptlock_free(page);
1449}
1450
1451#else	/* !USE_SPLIT_PTE_PTLOCKS */
1452/*
1453 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1454 */
1455static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1456{
1457	return &mm->page_table_lock;
1458}
1459static inline void ptlock_cache_init(void) {}
1460static inline bool ptlock_init(struct page *page) { return true; }
1461static inline void pte_lock_deinit(struct page *page) {}
1462#endif /* USE_SPLIT_PTE_PTLOCKS */
1463
1464static inline void pgtable_init(void)
1465{
1466	ptlock_cache_init();
1467	pgtable_cache_init();
1468}
1469
1470static inline bool pgtable_page_ctor(struct page *page)
1471{
 
 
1472	inc_zone_page_state(page, NR_PAGETABLE);
1473	return ptlock_init(page);
1474}
1475
1476static inline void pgtable_page_dtor(struct page *page)
1477{
1478	pte_lock_deinit(page);
1479	dec_zone_page_state(page, NR_PAGETABLE);
1480}
1481
1482#define pte_offset_map_lock(mm, pmd, address, ptlp)	\
1483({							\
1484	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
1485	pte_t *__pte = pte_offset_map(pmd, address);	\
1486	*(ptlp) = __ptl;				\
1487	spin_lock(__ptl);				\
1488	__pte;						\
1489})
1490
1491#define pte_unmap_unlock(pte, ptl)	do {		\
1492	spin_unlock(ptl);				\
1493	pte_unmap(pte);					\
1494} while (0)
1495
1496#define pte_alloc_map(mm, vma, pmd, address)				\
1497	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,	\
1498							pmd, address))?	\
1499	 NULL: pte_offset_map(pmd, address))
 
1500
1501#define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
1502	((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,	\
1503							pmd, address))?	\
1504		NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
1505
1506#define pte_alloc_kernel(pmd, address)			\
1507	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1508		NULL: pte_offset_kernel(pmd, address))
1509
1510#if USE_SPLIT_PMD_PTLOCKS
1511
1512static struct page *pmd_to_page(pmd_t *pmd)
1513{
1514	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1515	return virt_to_page((void *)((unsigned long) pmd & mask));
1516}
1517
1518static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1519{
1520	return ptlock_ptr(pmd_to_page(pmd));
1521}
1522
1523static inline bool pgtable_pmd_page_ctor(struct page *page)
1524{
1525#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1526	page->pmd_huge_pte = NULL;
1527#endif
1528	return ptlock_init(page);
1529}
1530
1531static inline void pgtable_pmd_page_dtor(struct page *page)
1532{
1533#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1534	VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1535#endif
1536	ptlock_free(page);
1537}
1538
1539#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1540
1541#else
1542
1543static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1544{
1545	return &mm->page_table_lock;
1546}
1547
1548static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1549static inline void pgtable_pmd_page_dtor(struct page *page) {}
1550
1551#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1552
1553#endif
1554
1555static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1556{
1557	spinlock_t *ptl = pmd_lockptr(mm, pmd);
1558	spin_lock(ptl);
1559	return ptl;
1560}
1561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1562extern void free_area_init(unsigned long * zones_size);
1563extern void free_area_init_node(int nid, unsigned long * zones_size,
1564		unsigned long zone_start_pfn, unsigned long *zholes_size);
1565extern void free_initmem(void);
1566
1567/*
1568 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1569 * into the buddy system. The freed pages will be poisoned with pattern
1570 * "poison" if it's within range [0, UCHAR_MAX].
1571 * Return pages freed into the buddy system.
1572 */
1573extern unsigned long free_reserved_area(void *start, void *end,
1574					int poison, char *s);
1575
1576#ifdef	CONFIG_HIGHMEM
1577/*
1578 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1579 * and totalram_pages.
1580 */
1581extern void free_highmem_page(struct page *page);
1582#endif
1583
1584extern void adjust_managed_page_count(struct page *page, long count);
1585extern void mem_init_print_info(const char *str);
1586
 
 
1587/* Free the reserved page into the buddy system, so it gets managed. */
1588static inline void __free_reserved_page(struct page *page)
1589{
1590	ClearPageReserved(page);
1591	init_page_count(page);
1592	__free_page(page);
1593}
1594
1595static inline void free_reserved_page(struct page *page)
1596{
1597	__free_reserved_page(page);
1598	adjust_managed_page_count(page, 1);
1599}
1600
1601static inline void mark_page_reserved(struct page *page)
1602{
1603	SetPageReserved(page);
1604	adjust_managed_page_count(page, -1);
1605}
1606
1607/*
1608 * Default method to free all the __init memory into the buddy system.
1609 * The freed pages will be poisoned with pattern "poison" if it's within
1610 * range [0, UCHAR_MAX].
1611 * Return pages freed into the buddy system.
1612 */
1613static inline unsigned long free_initmem_default(int poison)
1614{
1615	extern char __init_begin[], __init_end[];
1616
1617	return free_reserved_area(&__init_begin, &__init_end,
1618				  poison, "unused kernel");
1619}
1620
1621static inline unsigned long get_num_physpages(void)
1622{
1623	int nid;
1624	unsigned long phys_pages = 0;
1625
1626	for_each_online_node(nid)
1627		phys_pages += node_present_pages(nid);
1628
1629	return phys_pages;
1630}
1631
1632#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
1633/*
1634 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
1635 * zones, allocate the backing mem_map and account for memory holes in a more
1636 * architecture independent manner. This is a substitute for creating the
1637 * zone_sizes[] and zholes_size[] arrays and passing them to
1638 * free_area_init_node()
1639 *
1640 * An architecture is expected to register range of page frames backed by
1641 * physical memory with memblock_add[_node]() before calling
1642 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
1643 * usage, an architecture is expected to do something like
1644 *
1645 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
1646 * 							 max_highmem_pfn};
1647 * for_each_valid_physical_page_range()
1648 * 	memblock_add_node(base, size, nid)
1649 * free_area_init_nodes(max_zone_pfns);
1650 *
1651 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
1652 * registered physical page range.  Similarly
1653 * sparse_memory_present_with_active_regions() calls memory_present() for
1654 * each range when SPARSEMEM is enabled.
1655 *
1656 * See mm/page_alloc.c for more information on each function exposed by
1657 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
1658 */
1659extern void free_area_init_nodes(unsigned long *max_zone_pfn);
1660unsigned long node_map_pfn_alignment(void);
1661unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
1662						unsigned long end_pfn);
1663extern unsigned long absent_pages_in_range(unsigned long start_pfn,
1664						unsigned long end_pfn);
1665extern void get_pfn_range_for_nid(unsigned int nid,
1666			unsigned long *start_pfn, unsigned long *end_pfn);
1667extern unsigned long find_min_pfn_with_active_regions(void);
1668extern void free_bootmem_with_active_regions(int nid,
1669						unsigned long max_low_pfn);
1670extern void sparse_memory_present_with_active_regions(int nid);
1671
1672#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
1673
1674#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
1675    !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
1676static inline int __early_pfn_to_nid(unsigned long pfn)
 
1677{
1678	return 0;
1679}
1680#else
1681/* please see mm/page_alloc.c */
1682extern int __meminit early_pfn_to_nid(unsigned long pfn);
1683/* there is a per-arch backend function. */
1684extern int __meminit __early_pfn_to_nid(unsigned long pfn);
 
 
 
 
 
 
 
1685#endif
1686
1687extern void set_dma_reserve(unsigned long new_dma_reserve);
1688extern void memmap_init_zone(unsigned long, int, unsigned long,
1689				unsigned long, enum memmap_context);
1690extern void setup_per_zone_wmarks(void);
1691extern int __meminit init_per_zone_wmark_min(void);
1692extern void mem_init(void);
1693extern void __init mmap_init(void);
1694extern void show_mem(unsigned int flags);
 
1695extern void si_meminfo(struct sysinfo * val);
1696extern void si_meminfo_node(struct sysinfo *val, int nid);
 
 
 
1697
1698extern __printf(3, 4)
1699void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
1700
1701extern void setup_per_cpu_pageset(void);
1702
1703extern void zone_pcp_update(struct zone *zone);
1704extern void zone_pcp_reset(struct zone *zone);
1705
1706/* page_alloc.c */
1707extern int min_free_kbytes;
 
1708
1709/* nommu.c */
1710extern atomic_long_t mmap_pages_allocated;
1711extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
1712
1713/* interval_tree.c */
1714void vma_interval_tree_insert(struct vm_area_struct *node,
1715			      struct rb_root *root);
1716void vma_interval_tree_insert_after(struct vm_area_struct *node,
1717				    struct vm_area_struct *prev,
1718				    struct rb_root *root);
1719void vma_interval_tree_remove(struct vm_area_struct *node,
1720			      struct rb_root *root);
1721struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
1722				unsigned long start, unsigned long last);
1723struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
1724				unsigned long start, unsigned long last);
1725
1726#define vma_interval_tree_foreach(vma, root, start, last)		\
1727	for (vma = vma_interval_tree_iter_first(root, start, last);	\
1728	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
1729
1730static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
1731					struct list_head *list)
1732{
1733	list_add_tail(&vma->shared.nonlinear, list);
1734}
1735
1736void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
1737				   struct rb_root *root);
1738void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
1739				   struct rb_root *root);
1740struct anon_vma_chain *anon_vma_interval_tree_iter_first(
1741	struct rb_root *root, unsigned long start, unsigned long last);
 
1742struct anon_vma_chain *anon_vma_interval_tree_iter_next(
1743	struct anon_vma_chain *node, unsigned long start, unsigned long last);
1744#ifdef CONFIG_DEBUG_VM_RB
1745void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
1746#endif
1747
1748#define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
1749	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
1750	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
1751
1752/* mmap.c */
1753extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
1754extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
1755	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
 
 
 
 
 
 
1756extern struct vm_area_struct *vma_merge(struct mm_struct *,
1757	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
1758	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
1759	struct mempolicy *);
1760extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
1761extern int split_vma(struct mm_struct *,
1762	struct vm_area_struct *, unsigned long addr, int new_below);
 
 
1763extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
1764extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1765	struct rb_node **, struct rb_node *);
1766extern void unlink_file_vma(struct vm_area_struct *);
1767extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1768	unsigned long addr, unsigned long len, pgoff_t pgoff,
1769	bool *need_rmap_locks);
1770extern void exit_mmap(struct mm_struct *);
1771
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1772extern int mm_take_all_locks(struct mm_struct *mm);
1773extern void mm_drop_all_locks(struct mm_struct *mm);
1774
1775extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
1776extern struct file *get_mm_exe_file(struct mm_struct *mm);
 
 
 
 
1777
1778extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
 
1779extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
1780				   unsigned long addr, unsigned long len,
1781				   unsigned long flags, struct page **pages);
 
 
1782extern int install_special_mapping(struct mm_struct *mm,
1783				   unsigned long addr, unsigned long len,
1784				   unsigned long flags, struct page **pages);
1785
1786extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
1787
1788extern unsigned long mmap_region(struct file *file, unsigned long addr,
1789	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
1790extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
 
1791	unsigned long len, unsigned long prot, unsigned long flags,
1792	unsigned long pgoff, unsigned long *populate);
1793extern int do_munmap(struct mm_struct *, unsigned long, size_t);
 
 
 
 
 
 
 
 
 
 
 
1794
1795#ifdef CONFIG_MMU
1796extern int __mm_populate(unsigned long addr, unsigned long len,
1797			 int ignore_errors);
1798static inline void mm_populate(unsigned long addr, unsigned long len)
1799{
1800	/* Ignore errors */
1801	(void) __mm_populate(addr, len, 1);
1802}
1803#else
1804static inline void mm_populate(unsigned long addr, unsigned long len) {}
1805#endif
1806
1807/* These take the mm semaphore themselves */
1808extern unsigned long vm_brk(unsigned long, unsigned long);
 
1809extern int vm_munmap(unsigned long, size_t);
1810extern unsigned long vm_mmap(struct file *, unsigned long,
1811        unsigned long, unsigned long,
1812        unsigned long, unsigned long);
1813
1814struct vm_unmapped_area_info {
1815#define VM_UNMAPPED_AREA_TOPDOWN 1
1816	unsigned long flags;
1817	unsigned long length;
1818	unsigned long low_limit;
1819	unsigned long high_limit;
1820	unsigned long align_mask;
1821	unsigned long align_offset;
1822};
1823
1824extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
1825extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
1826
1827/*
1828 * Search for an unmapped address range.
1829 *
1830 * We are looking for a range that:
1831 * - does not intersect with any VMA;
1832 * - is contained within the [low_limit, high_limit) interval;
1833 * - is at least the desired size.
1834 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
1835 */
1836static inline unsigned long
1837vm_unmapped_area(struct vm_unmapped_area_info *info)
1838{
1839	if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
1840		return unmapped_area(info);
1841	else
1842		return unmapped_area_topdown(info);
 
 
1843}
1844
1845/* truncate.c */
1846extern void truncate_inode_pages(struct address_space *, loff_t);
1847extern void truncate_inode_pages_range(struct address_space *,
1848				       loff_t lstart, loff_t lend);
1849extern void truncate_inode_pages_final(struct address_space *);
1850
1851/* generic vm_area_ops exported for stackable file systems */
1852extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
1853extern void filemap_map_pages(struct vm_area_struct *vma, struct vm_fault *vmf);
1854extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
 
1855
1856/* mm/page-writeback.c */
1857int write_one_page(struct page *page, int wait);
1858void task_dirty_inc(struct task_struct *tsk);
1859
1860/* readahead.c */
1861#define VM_MAX_READAHEAD	128	/* kbytes */
1862#define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
1863
1864int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
1865			pgoff_t offset, unsigned long nr_to_read);
1866
1867void page_cache_sync_readahead(struct address_space *mapping,
1868			       struct file_ra_state *ra,
1869			       struct file *filp,
1870			       pgoff_t offset,
1871			       unsigned long size);
1872
1873void page_cache_async_readahead(struct address_space *mapping,
1874				struct file_ra_state *ra,
1875				struct file *filp,
1876				struct page *pg,
1877				pgoff_t offset,
1878				unsigned long size);
1879
1880unsigned long max_sane_readahead(unsigned long nr);
1881
1882/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
1883extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
1884
1885/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
1886extern int expand_downwards(struct vm_area_struct *vma,
1887		unsigned long address);
1888#if VM_GROWSUP
1889extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
1890#else
1891  #define expand_upwards(vma, address) do { } while (0)
1892#endif
1893
1894/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
1895extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
1896extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
1897					     struct vm_area_struct **pprev);
1898
1899/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
1900   NULL if none.  Assume start_addr < end_addr. */
1901static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
1902{
1903	struct vm_area_struct * vma = find_vma(mm,start_addr);
1904
1905	if (vma && end_addr <= vma->vm_start)
1906		vma = NULL;
1907	return vma;
1908}
1909
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1910static inline unsigned long vma_pages(struct vm_area_struct *vma)
1911{
1912	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
1913}
1914
1915/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
1916static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
1917				unsigned long vm_start, unsigned long vm_end)
1918{
1919	struct vm_area_struct *vma = find_vma(mm, vm_start);
1920
1921	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
1922		vma = NULL;
1923
1924	return vma;
1925}
1926
1927#ifdef CONFIG_MMU
1928pgprot_t vm_get_page_prot(unsigned long vm_flags);
 
1929#else
1930static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
1931{
1932	return __pgprot(0);
1933}
 
 
 
 
1934#endif
1935
1936#ifdef CONFIG_NUMA_BALANCING
1937unsigned long change_prot_numa(struct vm_area_struct *vma,
1938			unsigned long start, unsigned long end);
1939#endif
1940
1941struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
1942int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
1943			unsigned long pfn, unsigned long size, pgprot_t);
1944int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
1945int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
1946			unsigned long pfn);
 
 
1947int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1948			unsigned long pfn);
 
 
1949int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
1950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1951
1952struct page *follow_page_mask(struct vm_area_struct *vma,
1953			      unsigned long address, unsigned int foll_flags,
1954			      unsigned int *page_mask);
1955
1956static inline struct page *follow_page(struct vm_area_struct *vma,
1957		unsigned long address, unsigned int foll_flags)
1958{
1959	unsigned int unused_page_mask;
1960	return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
1961}
1962
1963#define FOLL_WRITE	0x01	/* check pte is writable */
1964#define FOLL_TOUCH	0x02	/* mark page accessed */
1965#define FOLL_GET	0x04	/* do get_page on page */
1966#define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
1967#define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
1968#define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
1969				 * and return without waiting upon it */
1970#define FOLL_MLOCK	0x40	/* mark page as mlocked */
1971#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
1972#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
1973#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
1974#define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1975
1976typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
1977			void *data);
1978extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
1979			       unsigned long size, pte_fn_t fn, void *data);
1980
1981#ifdef CONFIG_PROC_FS
1982void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
1983#else
1984static inline void vm_stat_account(struct mm_struct *mm,
1985			unsigned long flags, struct file *file, long pages)
 
 
 
 
 
 
 
 
 
 
 
 
1986{
1987	mm->total_vm += pages;
1988}
1989#endif /* CONFIG_PROC_FS */
1990
1991#ifdef CONFIG_DEBUG_PAGEALLOC
1992extern void kernel_map_pages(struct page *page, int numpages, int enable);
 
 
 
 
 
 
1993#ifdef CONFIG_HIBERNATION
1994extern bool kernel_page_present(struct page *page);
1995#endif /* CONFIG_HIBERNATION */
1996#else
1997static inline void
1998kernel_map_pages(struct page *page, int numpages, int enable) {}
1999#ifdef CONFIG_HIBERNATION
2000static inline bool kernel_page_present(struct page *page) { return true; }
2001#endif /* CONFIG_HIBERNATION */
2002#endif
 
 
 
 
2003
 
2004extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2005#ifdef	__HAVE_ARCH_GATE_AREA
2006int in_gate_area_no_mm(unsigned long addr);
2007int in_gate_area(struct mm_struct *mm, unsigned long addr);
2008#else
2009int in_gate_area_no_mm(unsigned long addr);
2010#define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
 
 
 
 
 
 
 
2011#endif	/* __HAVE_ARCH_GATE_AREA */
2012
 
 
2013#ifdef CONFIG_SYSCTL
2014extern int sysctl_drop_caches;
2015int drop_caches_sysctl_handler(struct ctl_table *, int,
2016					void __user *, size_t *, loff_t *);
2017#endif
2018
2019unsigned long shrink_slab(struct shrink_control *shrink,
2020			  unsigned long nr_pages_scanned,
2021			  unsigned long lru_pages);
2022
2023#ifndef CONFIG_MMU
2024#define randomize_va_space 0
2025#else
2026extern int randomize_va_space;
2027#endif
2028
2029const char * arch_vma_name(struct vm_area_struct *vma);
2030void print_vma_addr(char *prefix, unsigned long rip);
2031
2032void sparse_mem_maps_populate_node(struct page **map_map,
2033				   unsigned long pnum_begin,
2034				   unsigned long pnum_end,
2035				   unsigned long map_count,
2036				   int nodeid);
2037
2038struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
 
2039pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2040pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
 
2041pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2042pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2043void *vmemmap_alloc_block(unsigned long size, int node);
 
2044void *vmemmap_alloc_block_buf(unsigned long size, int node);
 
2045void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2046int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2047			       int node);
2048int vmemmap_populate(unsigned long start, unsigned long end, int node);
 
2049void vmemmap_populate_print_last(void);
2050#ifdef CONFIG_MEMORY_HOTPLUG
2051void vmemmap_free(unsigned long start, unsigned long end);
 
2052#endif
2053void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2054				  unsigned long size);
2055
2056enum mf_flags {
2057	MF_COUNT_INCREASED = 1 << 0,
2058	MF_ACTION_REQUIRED = 1 << 1,
2059	MF_MUST_KILL = 1 << 2,
2060	MF_SOFT_OFFLINE = 1 << 3,
2061};
2062extern int memory_failure(unsigned long pfn, int trapno, int flags);
2063extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
2064extern int unpoison_memory(unsigned long pfn);
 
 
2065extern int sysctl_memory_failure_early_kill;
2066extern int sysctl_memory_failure_recovery;
2067extern void shake_page(struct page *p, int access);
2068extern atomic_long_t num_poisoned_pages;
2069extern int soft_offline_page(struct page *page, int flags);
2070
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2071#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2072extern void clear_huge_page(struct page *page,
2073			    unsigned long addr,
2074			    unsigned int pages_per_huge_page);
2075extern void copy_user_huge_page(struct page *dst, struct page *src,
2076				unsigned long addr, struct vm_area_struct *vma,
2077				unsigned int pages_per_huge_page);
 
 
 
 
2078#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2079
 
 
2080#ifdef CONFIG_DEBUG_PAGEALLOC
2081extern unsigned int _debug_guardpage_minorder;
 
2082
2083static inline unsigned int debug_guardpage_minorder(void)
2084{
2085	return _debug_guardpage_minorder;
2086}
2087
 
 
 
 
 
2088static inline bool page_is_guard(struct page *page)
2089{
2090	return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
 
 
 
 
 
 
 
 
 
2091}
2092#else
2093static inline unsigned int debug_guardpage_minorder(void) { return 0; }
 
2094static inline bool page_is_guard(struct page *page) { return false; }
2095#endif /* CONFIG_DEBUG_PAGEALLOC */
2096
2097#if MAX_NUMNODES > 1
2098void __init setup_nr_node_ids(void);
2099#else
2100static inline void setup_nr_node_ids(void) {}
2101#endif
2102
2103#endif /* __KERNEL__ */
2104#endif /* _LINUX_MM_H */
v4.17
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_MM_H
   3#define _LINUX_MM_H
   4
   5#include <linux/errno.h>
   6
   7#ifdef __KERNEL__
   8
   9#include <linux/mmdebug.h>
  10#include <linux/gfp.h>
  11#include <linux/bug.h>
  12#include <linux/list.h>
  13#include <linux/mmzone.h>
  14#include <linux/rbtree.h>
  15#include <linux/atomic.h>
  16#include <linux/debug_locks.h>
  17#include <linux/mm_types.h>
  18#include <linux/range.h>
  19#include <linux/pfn.h>
  20#include <linux/percpu-refcount.h>
  21#include <linux/bit_spinlock.h>
  22#include <linux/shrinker.h>
  23#include <linux/resource.h>
  24#include <linux/page_ext.h>
  25#include <linux/err.h>
  26#include <linux/page_ref.h>
  27#include <linux/memremap.h>
  28
  29struct mempolicy;
  30struct anon_vma;
  31struct anon_vma_chain;
  32struct file_ra_state;
  33struct user_struct;
  34struct writeback_control;
  35struct bdi_writeback;
  36
  37void init_mm_internals(void);
  38
  39#ifndef CONFIG_NEED_MULTIPLE_NODES	/* Don't use mapnrs, do it properly */
  40extern unsigned long max_mapnr;
  41
  42static inline void set_max_mapnr(unsigned long limit)
  43{
  44	max_mapnr = limit;
  45}
  46#else
  47static inline void set_max_mapnr(unsigned long limit) { }
  48#endif
  49
  50extern unsigned long totalram_pages;
  51extern void * high_memory;
  52extern int page_cluster;
  53
  54#ifdef CONFIG_SYSCTL
  55extern int sysctl_legacy_va_layout;
  56#else
  57#define sysctl_legacy_va_layout 0
  58#endif
  59
  60#ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
  61extern const int mmap_rnd_bits_min;
  62extern const int mmap_rnd_bits_max;
  63extern int mmap_rnd_bits __read_mostly;
  64#endif
  65#ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
  66extern const int mmap_rnd_compat_bits_min;
  67extern const int mmap_rnd_compat_bits_max;
  68extern int mmap_rnd_compat_bits __read_mostly;
  69#endif
  70
  71#include <asm/page.h>
  72#include <asm/pgtable.h>
  73#include <asm/processor.h>
  74
  75#ifndef __pa_symbol
  76#define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
  77#endif
  78
  79#ifndef page_to_virt
  80#define page_to_virt(x)	__va(PFN_PHYS(page_to_pfn(x)))
  81#endif
  82
  83#ifndef lm_alias
  84#define lm_alias(x)	__va(__pa_symbol(x))
  85#endif
  86
  87/*
  88 * To prevent common memory management code establishing
  89 * a zero page mapping on a read fault.
  90 * This macro should be defined within <asm/pgtable.h>.
  91 * s390 does this to prevent multiplexing of hardware bits
  92 * related to the physical page in case of virtualization.
  93 */
  94#ifndef mm_forbids_zeropage
  95#define mm_forbids_zeropage(X)	(0)
  96#endif
  97
  98/*
  99 * On some architectures it is expensive to call memset() for small sizes.
 100 * Those architectures should provide their own implementation of "struct page"
 101 * zeroing by defining this macro in <asm/pgtable.h>.
 102 */
 103#ifndef mm_zero_struct_page
 104#define mm_zero_struct_page(pp)  ((void)memset((pp), 0, sizeof(struct page)))
 105#endif
 106
 107/*
 108 * Default maximum number of active map areas, this limits the number of vmas
 109 * per mm struct. Users can overwrite this number by sysctl but there is a
 110 * problem.
 111 *
 112 * When a program's coredump is generated as ELF format, a section is created
 113 * per a vma. In ELF, the number of sections is represented in unsigned short.
 114 * This means the number of sections should be smaller than 65535 at coredump.
 115 * Because the kernel adds some informative sections to a image of program at
 116 * generating coredump, we need some margin. The number of extra sections is
 117 * 1-3 now and depends on arch. We use "5" as safe margin, here.
 118 *
 119 * ELF extended numbering allows more than 65535 sections, so 16-bit bound is
 120 * not a hard limit any more. Although some userspace tools can be surprised by
 121 * that.
 122 */
 123#define MAPCOUNT_ELF_CORE_MARGIN	(5)
 124#define DEFAULT_MAX_MAP_COUNT	(USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
 125
 126extern int sysctl_max_map_count;
 127
 128extern unsigned long sysctl_user_reserve_kbytes;
 129extern unsigned long sysctl_admin_reserve_kbytes;
 130
 131extern int sysctl_overcommit_memory;
 132extern int sysctl_overcommit_ratio;
 133extern unsigned long sysctl_overcommit_kbytes;
 134
 135extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
 136				    size_t *, loff_t *);
 137extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
 138				    size_t *, loff_t *);
 139
 140#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
 141
 142/* to align the pointer to the (next) page boundary */
 143#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
 144
 145/* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
 146#define PAGE_ALIGNED(addr)	IS_ALIGNED((unsigned long)(addr), PAGE_SIZE)
 147
 148/*
 149 * Linux kernel virtual memory manager primitives.
 150 * The idea being to have a "virtual" mm in the same way
 151 * we have a virtual fs - giving a cleaner interface to the
 152 * mm details, and allowing different kinds of memory mappings
 153 * (from shared memory to executable loading to arbitrary
 154 * mmap() functions).
 155 */
 156
 157extern struct kmem_cache *vm_area_cachep;
 158
 159#ifndef CONFIG_MMU
 160extern struct rb_root nommu_region_tree;
 161extern struct rw_semaphore nommu_region_sem;
 162
 163extern unsigned int kobjsize(const void *objp);
 164#endif
 165
 166/*
 167 * vm_flags in vm_area_struct, see mm_types.h.
 168 * When changing, update also include/trace/events/mmflags.h
 169 */
 170#define VM_NONE		0x00000000
 171
 172#define VM_READ		0x00000001	/* currently active flags */
 173#define VM_WRITE	0x00000002
 174#define VM_EXEC		0x00000004
 175#define VM_SHARED	0x00000008
 176
 177/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
 178#define VM_MAYREAD	0x00000010	/* limits for mprotect() etc */
 179#define VM_MAYWRITE	0x00000020
 180#define VM_MAYEXEC	0x00000040
 181#define VM_MAYSHARE	0x00000080
 182
 183#define VM_GROWSDOWN	0x00000100	/* general info on the segment */
 184#define VM_UFFD_MISSING	0x00000200	/* missing pages tracking */
 185#define VM_PFNMAP	0x00000400	/* Page-ranges managed without "struct page", just pure PFN */
 186#define VM_DENYWRITE	0x00000800	/* ETXTBSY on write attempts.. */
 187#define VM_UFFD_WP	0x00001000	/* wrprotect pages tracking */
 188
 189#define VM_LOCKED	0x00002000
 190#define VM_IO           0x00004000	/* Memory mapped I/O or similar */
 191
 192					/* Used by sys_madvise() */
 193#define VM_SEQ_READ	0x00008000	/* App will access data sequentially */
 194#define VM_RAND_READ	0x00010000	/* App will not benefit from clustered reads */
 195
 196#define VM_DONTCOPY	0x00020000      /* Do not copy this vma on fork */
 197#define VM_DONTEXPAND	0x00040000	/* Cannot expand with mremap() */
 198#define VM_LOCKONFAULT	0x00080000	/* Lock the pages covered when they are faulted in */
 199#define VM_ACCOUNT	0x00100000	/* Is a VM accounted object */
 200#define VM_NORESERVE	0x00200000	/* should the VM suppress accounting */
 201#define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 202#define VM_SYNC		0x00800000	/* Synchronous page faults */
 203#define VM_ARCH_1	0x01000000	/* Architecture-specific flag */
 204#define VM_WIPEONFORK	0x02000000	/* Wipe VMA contents in child. */
 205#define VM_DONTDUMP	0x04000000	/* Do not include in the core dump */
 206
 207#ifdef CONFIG_MEM_SOFT_DIRTY
 208# define VM_SOFTDIRTY	0x08000000	/* Not soft dirty clean area */
 209#else
 210# define VM_SOFTDIRTY	0
 211#endif
 212
 213#define VM_MIXEDMAP	0x10000000	/* Can contain "struct page" and pure PFN pages */
 214#define VM_HUGEPAGE	0x20000000	/* MADV_HUGEPAGE marked this vma */
 215#define VM_NOHUGEPAGE	0x40000000	/* MADV_NOHUGEPAGE marked this vma */
 216#define VM_MERGEABLE	0x80000000	/* KSM may merge identical pages */
 217
 218#ifdef CONFIG_ARCH_USES_HIGH_VMA_FLAGS
 219#define VM_HIGH_ARCH_BIT_0	32	/* bit only usable on 64-bit architectures */
 220#define VM_HIGH_ARCH_BIT_1	33	/* bit only usable on 64-bit architectures */
 221#define VM_HIGH_ARCH_BIT_2	34	/* bit only usable on 64-bit architectures */
 222#define VM_HIGH_ARCH_BIT_3	35	/* bit only usable on 64-bit architectures */
 223#define VM_HIGH_ARCH_BIT_4	36	/* bit only usable on 64-bit architectures */
 224#define VM_HIGH_ARCH_0	BIT(VM_HIGH_ARCH_BIT_0)
 225#define VM_HIGH_ARCH_1	BIT(VM_HIGH_ARCH_BIT_1)
 226#define VM_HIGH_ARCH_2	BIT(VM_HIGH_ARCH_BIT_2)
 227#define VM_HIGH_ARCH_3	BIT(VM_HIGH_ARCH_BIT_3)
 228#define VM_HIGH_ARCH_4	BIT(VM_HIGH_ARCH_BIT_4)
 229#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
 230
 231#if defined(CONFIG_X86)
 232# define VM_PAT		VM_ARCH_1	/* PAT reserves whole VMA at once (x86) */
 233#if defined (CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS)
 234# define VM_PKEY_SHIFT	VM_HIGH_ARCH_BIT_0
 235# define VM_PKEY_BIT0	VM_HIGH_ARCH_0	/* A protection key is a 4-bit value */
 236# define VM_PKEY_BIT1	VM_HIGH_ARCH_1
 237# define VM_PKEY_BIT2	VM_HIGH_ARCH_2
 238# define VM_PKEY_BIT3	VM_HIGH_ARCH_3
 239#endif
 240#elif defined(CONFIG_PPC)
 241# define VM_SAO		VM_ARCH_1	/* Strong Access Ordering (powerpc) */
 242#elif defined(CONFIG_PARISC)
 243# define VM_GROWSUP	VM_ARCH_1
 
 
 244#elif defined(CONFIG_IA64)
 245# define VM_GROWSUP	VM_ARCH_1
 246#elif defined(CONFIG_SPARC64)
 247# define VM_SPARC_ADI	VM_ARCH_1	/* Uses ADI tag for access control */
 248# define VM_ARCH_CLEAR	VM_SPARC_ADI
 249#elif !defined(CONFIG_MMU)
 250# define VM_MAPPED_COPY	VM_ARCH_1	/* T if mapped copy of data (nommu mmap) */
 251#endif
 252
 253#if defined(CONFIG_X86_INTEL_MPX)
 254/* MPX specific bounds table or bounds directory */
 255# define VM_MPX		VM_HIGH_ARCH_4
 256#else
 257# define VM_MPX		VM_NONE
 258#endif
 259
 260#ifndef VM_GROWSUP
 261# define VM_GROWSUP	VM_NONE
 262#endif
 263
 264/* Bits set in the VMA until the stack is in its final location */
 265#define VM_STACK_INCOMPLETE_SETUP	(VM_RAND_READ | VM_SEQ_READ)
 266
 267#ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 268#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
 269#endif
 270
 271#ifdef CONFIG_STACK_GROWSUP
 272#define VM_STACK	VM_GROWSUP
 273#else
 274#define VM_STACK	VM_GROWSDOWN
 275#endif
 276
 277#define VM_STACK_FLAGS	(VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
 278
 279/*
 280 * Special vmas that are non-mergable, non-mlock()able.
 281 * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
 282 */
 283#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
 284
 285/* This mask defines which mm->def_flags a process can inherit its parent */
 286#define VM_INIT_DEF_MASK	VM_NOHUGEPAGE
 287
 288/* This mask is used to clear all the VMA flags used by mlock */
 289#define VM_LOCKED_CLEAR_MASK	(~(VM_LOCKED | VM_LOCKONFAULT))
 290
 291/* Arch-specific flags to clear when updating VM flags on protection change */
 292#ifndef VM_ARCH_CLEAR
 293# define VM_ARCH_CLEAR	VM_NONE
 294#endif
 295#define VM_FLAGS_CLEAR	(ARCH_VM_PKEY_FLAGS | VM_ARCH_CLEAR)
 296
 297/*
 298 * mapping from the currently active vm_flags protection bits (the
 299 * low four bits) to a page protection mask..
 300 */
 301extern pgprot_t protection_map[16];
 302
 303#define FAULT_FLAG_WRITE	0x01	/* Fault was a write access */
 304#define FAULT_FLAG_MKWRITE	0x02	/* Fault was mkwrite of existing pte */
 305#define FAULT_FLAG_ALLOW_RETRY	0x04	/* Retry fault if blocking */
 306#define FAULT_FLAG_RETRY_NOWAIT	0x08	/* Don't drop mmap_sem and wait when retrying */
 307#define FAULT_FLAG_KILLABLE	0x10	/* The fault task is in SIGKILL killable region */
 308#define FAULT_FLAG_TRIED	0x20	/* Second try */
 309#define FAULT_FLAG_USER		0x40	/* The fault originated in userspace */
 310#define FAULT_FLAG_REMOTE	0x80	/* faulting for non current tsk/mm */
 311#define FAULT_FLAG_INSTRUCTION  0x100	/* The fault was during an instruction fetch */
 312
 313#define FAULT_FLAG_TRACE \
 314	{ FAULT_FLAG_WRITE,		"WRITE" }, \
 315	{ FAULT_FLAG_MKWRITE,		"MKWRITE" }, \
 316	{ FAULT_FLAG_ALLOW_RETRY,	"ALLOW_RETRY" }, \
 317	{ FAULT_FLAG_RETRY_NOWAIT,	"RETRY_NOWAIT" }, \
 318	{ FAULT_FLAG_KILLABLE,		"KILLABLE" }, \
 319	{ FAULT_FLAG_TRIED,		"TRIED" }, \
 320	{ FAULT_FLAG_USER,		"USER" }, \
 321	{ FAULT_FLAG_REMOTE,		"REMOTE" }, \
 322	{ FAULT_FLAG_INSTRUCTION,	"INSTRUCTION" }
 323
 324/*
 325 * vm_fault is filled by the the pagefault handler and passed to the vma's
 326 * ->fault function. The vma's ->fault is responsible for returning a bitmask
 327 * of VM_FAULT_xxx flags that give details about how the fault was handled.
 328 *
 329 * MM layer fills up gfp_mask for page allocations but fault handler might
 330 * alter it if its implementation requires a different allocation context.
 331 *
 332 * pgoff should be used in favour of virtual_address, if possible.
 333 */
 334struct vm_fault {
 335	struct vm_area_struct *vma;	/* Target VMA */
 336	unsigned int flags;		/* FAULT_FLAG_xxx flags */
 337	gfp_t gfp_mask;			/* gfp mask to be used for allocations */
 338	pgoff_t pgoff;			/* Logical page offset based on vma */
 339	unsigned long address;		/* Faulting virtual address */
 340	pmd_t *pmd;			/* Pointer to pmd entry matching
 341					 * the 'address' */
 342	pud_t *pud;			/* Pointer to pud entry matching
 343					 * the 'address'
 344					 */
 345	pte_t orig_pte;			/* Value of PTE at the time of fault */
 346
 347	struct page *cow_page;		/* Page handler may use for COW fault */
 348	struct mem_cgroup *memcg;	/* Cgroup cow_page belongs to */
 349	struct page *page;		/* ->fault handlers should return a
 350					 * page here, unless VM_FAULT_NOPAGE
 351					 * is set (which is also implied by
 352					 * VM_FAULT_ERROR).
 353					 */
 354	/* These three entries are valid only while holding ptl lock */
 355	pte_t *pte;			/* Pointer to pte entry matching
 356					 * the 'address'. NULL if the page
 357					 * table hasn't been allocated.
 358					 */
 359	spinlock_t *ptl;		/* Page table lock.
 360					 * Protects pte page table if 'pte'
 361					 * is not NULL, otherwise pmd.
 362					 */
 363	pgtable_t prealloc_pte;		/* Pre-allocated pte page table.
 364					 * vm_ops->map_pages() calls
 365					 * alloc_set_pte() from atomic context.
 366					 * do_fault_around() pre-allocates
 367					 * page table to avoid allocation from
 368					 * atomic context.
 369					 */
 370};
 371
 372/* page entry size for vm->huge_fault() */
 373enum page_entry_size {
 374	PE_SIZE_PTE = 0,
 375	PE_SIZE_PMD,
 376	PE_SIZE_PUD,
 377};
 378
 379/*
 380 * These are the virtual MM functions - opening of an area, closing and
 381 * unmapping it (needed to keep files on disk up-to-date etc), pointer
 382 * to the functions called when a no-page or a wp-page exception occurs. 
 383 */
 384struct vm_operations_struct {
 385	void (*open)(struct vm_area_struct * area);
 386	void (*close)(struct vm_area_struct * area);
 387	int (*split)(struct vm_area_struct * area, unsigned long addr);
 388	int (*mremap)(struct vm_area_struct * area);
 389	vm_fault_t (*fault)(struct vm_fault *vmf);
 390	vm_fault_t (*huge_fault)(struct vm_fault *vmf,
 391			enum page_entry_size pe_size);
 392	void (*map_pages)(struct vm_fault *vmf,
 393			pgoff_t start_pgoff, pgoff_t end_pgoff);
 394	unsigned long (*pagesize)(struct vm_area_struct * area);
 395
 396	/* notification that a previously read-only page is about to become
 397	 * writable, if an error is returned it will cause a SIGBUS */
 398	vm_fault_t (*page_mkwrite)(struct vm_fault *vmf);
 399
 400	/* same as page_mkwrite when using VM_PFNMAP|VM_MIXEDMAP */
 401	vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
 402
 403	/* called by access_process_vm when get_user_pages() fails, typically
 404	 * for use by special VMAs that can switch between memory and hardware
 405	 */
 406	int (*access)(struct vm_area_struct *vma, unsigned long addr,
 407		      void *buf, int len, int write);
 408
 409	/* Called by the /proc/PID/maps code to ask the vma whether it
 410	 * has a special name.  Returning non-NULL will also cause this
 411	 * vma to be dumped unconditionally. */
 412	const char *(*name)(struct vm_area_struct *vma);
 413
 414#ifdef CONFIG_NUMA
 415	/*
 416	 * set_policy() op must add a reference to any non-NULL @new mempolicy
 417	 * to hold the policy upon return.  Caller should pass NULL @new to
 418	 * remove a policy and fall back to surrounding context--i.e. do not
 419	 * install a MPOL_DEFAULT policy, nor the task or system default
 420	 * mempolicy.
 421	 */
 422	int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
 423
 424	/*
 425	 * get_policy() op must add reference [mpol_get()] to any policy at
 426	 * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
 427	 * in mm/mempolicy.c will do this automatically.
 428	 * get_policy() must NOT add a ref if the policy at (vma,addr) is not
 429	 * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
 430	 * If no [shared/vma] mempolicy exists at the addr, get_policy() op
 431	 * must return NULL--i.e., do not "fallback" to task or system default
 432	 * policy.
 433	 */
 434	struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
 435					unsigned long addr);
 
 
 436#endif
 437	/*
 438	 * Called by vm_normal_page() for special PTEs to find the
 439	 * page for @addr.  This is useful if the default behavior
 440	 * (using pte_page()) would not find the correct page.
 441	 */
 442	struct page *(*find_special_page)(struct vm_area_struct *vma,
 443					  unsigned long addr);
 444};
 445
 446struct mmu_gather;
 447struct inode;
 448
 449#define page_private(page)		((page)->private)
 450#define set_page_private(page, v)	((page)->private = (v))
 451
 452#if !defined(__HAVE_ARCH_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
 453static inline int pmd_devmap(pmd_t pmd)
 454{
 455	return 0;
 456}
 457static inline int pud_devmap(pud_t pud)
 
 
 458{
 459	return 0;
 460}
 461static inline int pgd_devmap(pgd_t pgd)
 462{
 463	return 0;
 464}
 465#endif
 466
 467/*
 468 * FIXME: take this include out, include page-flags.h in
 469 * files which need it (119 of them)
 470 */
 471#include <linux/page-flags.h>
 472#include <linux/huge_mm.h>
 473
 474/*
 475 * Methods to modify the page usage count.
 476 *
 477 * What counts for a page usage:
 478 * - cache mapping   (page->mapping)
 479 * - private data    (page->private)
 480 * - page mapped in a task's page tables, each mapping
 481 *   is counted separately
 482 *
 483 * Also, many kernel routines increase the page count before a critical
 484 * routine so they can be sure the page doesn't go away from under them.
 485 */
 486
 487/*
 488 * Drop a ref, return true if the refcount fell to zero (the page has no users)
 489 */
 490static inline int put_page_testzero(struct page *page)
 491{
 492	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
 493	return page_ref_dec_and_test(page);
 494}
 495
 496/*
 497 * Try to grab a ref unless the page has a refcount of zero, return false if
 498 * that is the case.
 499 * This can be called when MMU is off so it must not access
 500 * any of the virtual mappings.
 501 */
 502static inline int get_page_unless_zero(struct page *page)
 503{
 504	return page_ref_add_unless(page, 1, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 505}
 506
 507extern int page_is_ram(unsigned long pfn);
 508
 509enum {
 510	REGION_INTERSECTS,
 511	REGION_DISJOINT,
 512	REGION_MIXED,
 513};
 514
 515int region_intersects(resource_size_t offset, size_t size, unsigned long flags,
 516		      unsigned long desc);
 517
 518/* Support for virtually mapped pages */
 519struct page *vmalloc_to_page(const void *addr);
 520unsigned long vmalloc_to_pfn(const void *addr);
 521
 522/*
 523 * Determine if an address is within the vmalloc range
 524 *
 525 * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
 526 * is no special casing required.
 527 */
 528static inline bool is_vmalloc_addr(const void *x)
 529{
 530#ifdef CONFIG_MMU
 531	unsigned long addr = (unsigned long)x;
 532
 533	return addr >= VMALLOC_START && addr < VMALLOC_END;
 534#else
 535	return false;
 536#endif
 537}
 538#ifdef CONFIG_MMU
 539extern int is_vmalloc_or_module_addr(const void *x);
 540#else
 541static inline int is_vmalloc_or_module_addr(const void *x)
 542{
 543	return 0;
 544}
 545#endif
 546
 547extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
 548static inline void *kvmalloc(size_t size, gfp_t flags)
 
 549{
 550	return kvmalloc_node(size, flags, NUMA_NO_NODE);
 
 
 
 551}
 552static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
 
 553{
 554	return kvmalloc_node(size, flags | __GFP_ZERO, node);
 
 
 
 555}
 556static inline void *kvzalloc(size_t size, gfp_t flags)
 
 557{
 558	return kvmalloc(size, flags | __GFP_ZERO);
 
 
 
 
 
 559}
 560
 561static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
 
 562{
 563	if (size != 0 && n > SIZE_MAX / size)
 564		return NULL;
 565
 566	return kvmalloc(n * size, flags);
 567}
 568
 569extern void kvfree(const void *addr);
 570
 571static inline atomic_t *compound_mapcount_ptr(struct page *page)
 572{
 573	return &page[1].compound_mapcount;
 574}
 575
 576static inline int compound_mapcount(struct page *page)
 577{
 578	VM_BUG_ON_PAGE(!PageCompound(page), page);
 579	page = compound_head(page);
 580	return atomic_read(compound_mapcount_ptr(page)) + 1;
 
 
 
 
 
 581}
 582
 583/*
 584 * The atomic page->_mapcount, starts from -1: so that transitions
 585 * both from it and to it can be tracked, using atomic_inc_and_test
 586 * and atomic_add_negative(-1).
 587 */
 588static inline void page_mapcount_reset(struct page *page)
 589{
 590	atomic_set(&(page)->_mapcount, -1);
 591}
 592
 593int __page_mapcount(struct page *page);
 
 
 
 594
 595static inline int page_mapcount(struct page *page)
 
 
 
 
 
 
 
 
 596{
 597	VM_BUG_ON_PAGE(PageSlab(page), page);
 
 
 598
 599	if (unlikely(PageCompound(page)))
 600		return __page_mapcount(page);
 601	return atomic_read(&page->_mapcount) + 1;
 602}
 603
 604#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 605int total_mapcount(struct page *page);
 606int page_trans_huge_mapcount(struct page *page, int *total_mapcount);
 607#else
 608static inline int total_mapcount(struct page *page)
 
 
 
 
 609{
 610	return page_mapcount(page);
 
 611}
 612static inline int page_trans_huge_mapcount(struct page *page,
 613					   int *total_mapcount)
 614{
 615	int mapcount = page_mapcount(page);
 616	if (total_mapcount)
 617		*total_mapcount = mapcount;
 618	return mapcount;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 619}
 620#endif
 621
 622static inline struct page *virt_to_head_page(const void *x)
 623{
 624	struct page *page = virt_to_page(x);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 625
 626	return compound_head(page);
 
 
 
 627}
 628
 629void __put_page(struct page *page);
 
 
 
 
 630
 
 631void put_pages_list(struct list_head *pages);
 632
 633void split_page(struct page *page, unsigned int order);
 
 634
 635/*
 636 * Compound pages have a destructor function.  Provide a
 637 * prototype for that function and accessor functions.
 638 * These are _only_ valid on the head of a compound page.
 639 */
 640typedef void compound_page_dtor(struct page *);
 641
 642/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
 643enum compound_dtor_id {
 644	NULL_COMPOUND_DTOR,
 645	COMPOUND_PAGE_DTOR,
 646#ifdef CONFIG_HUGETLB_PAGE
 647	HUGETLB_PAGE_DTOR,
 648#endif
 649#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 650	TRANSHUGE_PAGE_DTOR,
 651#endif
 652	NR_COMPOUND_DTORS,
 653};
 654extern compound_page_dtor * const compound_page_dtors[];
 655
 656static inline void set_compound_page_dtor(struct page *page,
 657		enum compound_dtor_id compound_dtor)
 658{
 659	VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page);
 660	page[1].compound_dtor = compound_dtor;
 661}
 662
 663static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
 664{
 665	VM_BUG_ON_PAGE(page[1].compound_dtor >= NR_COMPOUND_DTORS, page);
 666	return compound_page_dtors[page[1].compound_dtor];
 667}
 668
 669static inline unsigned int compound_order(struct page *page)
 670{
 671	if (!PageHead(page))
 672		return 0;
 673	return page[1].compound_order;
 674}
 675
 676static inline void set_compound_order(struct page *page, unsigned int order)
 677{
 678	page[1].compound_order = order;
 679}
 680
 681void free_compound_page(struct page *page);
 682
 683#ifdef CONFIG_MMU
 684/*
 685 * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
 686 * servicing faults for write access.  In the normal case, do always want
 687 * pte_mkwrite.  But get_user_pages can cause write faults for mappings
 688 * that do not have writing enabled, when used by access_process_vm.
 689 */
 690static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
 691{
 692	if (likely(vma->vm_flags & VM_WRITE))
 693		pte = pte_mkwrite(pte);
 694	return pte;
 695}
 696
 697int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
 698		struct page *page);
 699int finish_fault(struct vm_fault *vmf);
 700int finish_mkwrite_fault(struct vm_fault *vmf);
 701#endif
 702
 703/*
 704 * Multiple processes may "see" the same page. E.g. for untouched
 705 * mappings of /dev/null, all processes see the same page full of
 706 * zeroes, and text pages of executables and shared libraries have
 707 * only one copy in memory, at most, normally.
 708 *
 709 * For the non-reserved pages, page_count(page) denotes a reference count.
 710 *   page_count() == 0 means the page is free. page->lru is then used for
 711 *   freelist management in the buddy allocator.
 712 *   page_count() > 0  means the page has been allocated.
 713 *
 714 * Pages are allocated by the slab allocator in order to provide memory
 715 * to kmalloc and kmem_cache_alloc. In this case, the management of the
 716 * page, and the fields in 'struct page' are the responsibility of mm/slab.c
 717 * unless a particular usage is carefully commented. (the responsibility of
 718 * freeing the kmalloc memory is the caller's, of course).
 719 *
 720 * A page may be used by anyone else who does a __get_free_page().
 721 * In this case, page_count still tracks the references, and should only
 722 * be used through the normal accessor functions. The top bits of page->flags
 723 * and page->virtual store page management information, but all other fields
 724 * are unused and could be used privately, carefully. The management of this
 725 * page is the responsibility of the one who allocated it, and those who have
 726 * subsequently been given references to it.
 727 *
 728 * The other pages (we may call them "pagecache pages") are completely
 729 * managed by the Linux memory manager: I/O, buffers, swapping etc.
 730 * The following discussion applies only to them.
 731 *
 732 * A pagecache page contains an opaque `private' member, which belongs to the
 733 * page's address_space. Usually, this is the address of a circular list of
 734 * the page's disk buffers. PG_private must be set to tell the VM to call
 735 * into the filesystem to release these pages.
 736 *
 737 * A page may belong to an inode's memory mapping. In this case, page->mapping
 738 * is the pointer to the inode, and page->index is the file offset of the page,
 739 * in units of PAGE_SIZE.
 740 *
 741 * If pagecache pages are not associated with an inode, they are said to be
 742 * anonymous pages. These may become associated with the swapcache, and in that
 743 * case PG_swapcache is set, and page->private is an offset into the swapcache.
 744 *
 745 * In either case (swapcache or inode backed), the pagecache itself holds one
 746 * reference to the page. Setting PG_private should also increment the
 747 * refcount. The each user mapping also has a reference to the page.
 748 *
 749 * The pagecache pages are stored in a per-mapping radix tree, which is
 750 * rooted at mapping->i_pages, and indexed by offset.
 751 * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
 752 * lists, we instead now tag pages as dirty/writeback in the radix tree.
 753 *
 754 * All pagecache pages may be subject to I/O:
 755 * - inode pages may need to be read from disk,
 756 * - inode pages which have been modified and are MAP_SHARED may need
 757 *   to be written back to the inode on disk,
 758 * - anonymous pages (including MAP_PRIVATE file mappings) which have been
 759 *   modified may need to be swapped out to swap space and (later) to be read
 760 *   back into memory.
 761 */
 762
 763/*
 764 * The zone field is never updated after free_area_init_core()
 765 * sets it, so none of the operations on it need to be atomic.
 766 */
 767
 768/* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
 769#define SECTIONS_PGOFF		((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
 770#define NODES_PGOFF		(SECTIONS_PGOFF - NODES_WIDTH)
 771#define ZONES_PGOFF		(NODES_PGOFF - ZONES_WIDTH)
 772#define LAST_CPUPID_PGOFF	(ZONES_PGOFF - LAST_CPUPID_WIDTH)
 773
 774/*
 775 * Define the bit shifts to access each section.  For non-existent
 776 * sections we define the shift as 0; that plus a 0 mask ensures
 777 * the compiler will optimise away reference to them.
 778 */
 779#define SECTIONS_PGSHIFT	(SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
 780#define NODES_PGSHIFT		(NODES_PGOFF * (NODES_WIDTH != 0))
 781#define ZONES_PGSHIFT		(ZONES_PGOFF * (ZONES_WIDTH != 0))
 782#define LAST_CPUPID_PGSHIFT	(LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
 783
 784/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
 785#ifdef NODE_NOT_IN_PAGE_FLAGS
 786#define ZONEID_SHIFT		(SECTIONS_SHIFT + ZONES_SHIFT)
 787#define ZONEID_PGOFF		((SECTIONS_PGOFF < ZONES_PGOFF)? \
 788						SECTIONS_PGOFF : ZONES_PGOFF)
 789#else
 790#define ZONEID_SHIFT		(NODES_SHIFT + ZONES_SHIFT)
 791#define ZONEID_PGOFF		((NODES_PGOFF < ZONES_PGOFF)? \
 792						NODES_PGOFF : ZONES_PGOFF)
 793#endif
 794
 795#define ZONEID_PGSHIFT		(ZONEID_PGOFF * (ZONEID_SHIFT != 0))
 796
 797#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 798#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
 799#endif
 800
 801#define ZONES_MASK		((1UL << ZONES_WIDTH) - 1)
 802#define NODES_MASK		((1UL << NODES_WIDTH) - 1)
 803#define SECTIONS_MASK		((1UL << SECTIONS_WIDTH) - 1)
 804#define LAST_CPUPID_MASK	((1UL << LAST_CPUPID_SHIFT) - 1)
 805#define ZONEID_MASK		((1UL << ZONEID_SHIFT) - 1)
 806
 807static inline enum zone_type page_zonenum(const struct page *page)
 808{
 809	return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
 810}
 811
 812#ifdef CONFIG_ZONE_DEVICE
 813static inline bool is_zone_device_page(const struct page *page)
 814{
 815	return page_zonenum(page) == ZONE_DEVICE;
 816}
 817#else
 818static inline bool is_zone_device_page(const struct page *page)
 819{
 820	return false;
 821}
 822#endif
 823
 824#if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC)
 825void put_zone_device_private_or_public_page(struct page *page);
 826DECLARE_STATIC_KEY_FALSE(device_private_key);
 827#define IS_HMM_ENABLED static_branch_unlikely(&device_private_key)
 828static inline bool is_device_private_page(const struct page *page);
 829static inline bool is_device_public_page(const struct page *page);
 830#else /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
 831static inline void put_zone_device_private_or_public_page(struct page *page)
 832{
 833}
 834#define IS_HMM_ENABLED 0
 835static inline bool is_device_private_page(const struct page *page)
 836{
 837	return false;
 838}
 839static inline bool is_device_public_page(const struct page *page)
 840{
 841	return false;
 842}
 843#endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
 844
 845
 846static inline void get_page(struct page *page)
 847{
 848	page = compound_head(page);
 849	/*
 850	 * Getting a normal page or the head of a compound page
 851	 * requires to already have an elevated page->_refcount.
 852	 */
 853	VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
 854	page_ref_inc(page);
 855}
 856
 857static inline void put_page(struct page *page)
 858{
 859	page = compound_head(page);
 860
 861	/*
 862	 * For private device pages we need to catch refcount transition from
 863	 * 2 to 1, when refcount reach one it means the private device page is
 864	 * free and we need to inform the device driver through callback. See
 865	 * include/linux/memremap.h and HMM for details.
 866	 */
 867	if (IS_HMM_ENABLED && unlikely(is_device_private_page(page) ||
 868	    unlikely(is_device_public_page(page)))) {
 869		put_zone_device_private_or_public_page(page);
 870		return;
 871	}
 872
 873	if (put_page_testzero(page))
 874		__put_page(page);
 875}
 876
 877#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
 878#define SECTION_IN_PAGE_FLAGS
 879#endif
 880
 881/*
 882 * The identification function is mainly used by the buddy allocator for
 883 * determining if two pages could be buddies. We are not really identifying
 884 * the zone since we could be using the section number id if we do not have
 885 * node id available in page flags.
 886 * We only guarantee that it will return the same value for two combinable
 887 * pages in a zone.
 888 */
 889static inline int page_zone_id(struct page *page)
 890{
 891	return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
 892}
 893
 894static inline int zone_to_nid(struct zone *zone)
 895{
 896#ifdef CONFIG_NUMA
 897	return zone->node;
 898#else
 899	return 0;
 900#endif
 901}
 902
 903#ifdef NODE_NOT_IN_PAGE_FLAGS
 904extern int page_to_nid(const struct page *page);
 905#else
 906static inline int page_to_nid(const struct page *page)
 907{
 908	struct page *p = (struct page *)page;
 909
 910	return (PF_POISONED_CHECK(p)->flags >> NODES_PGSHIFT) & NODES_MASK;
 911}
 912#endif
 913
 914#ifdef CONFIG_NUMA_BALANCING
 915static inline int cpu_pid_to_cpupid(int cpu, int pid)
 916{
 917	return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
 918}
 919
 920static inline int cpupid_to_pid(int cpupid)
 921{
 922	return cpupid & LAST__PID_MASK;
 923}
 924
 925static inline int cpupid_to_cpu(int cpupid)
 926{
 927	return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
 928}
 929
 930static inline int cpupid_to_nid(int cpupid)
 931{
 932	return cpu_to_node(cpupid_to_cpu(cpupid));
 933}
 934
 935static inline bool cpupid_pid_unset(int cpupid)
 936{
 937	return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
 938}
 939
 940static inline bool cpupid_cpu_unset(int cpupid)
 941{
 942	return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
 943}
 944
 945static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
 946{
 947	return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
 948}
 949
 950#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
 951#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
 952static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 953{
 954	return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
 955}
 956
 957static inline int page_cpupid_last(struct page *page)
 958{
 959	return page->_last_cpupid;
 960}
 961static inline void page_cpupid_reset_last(struct page *page)
 962{
 963	page->_last_cpupid = -1 & LAST_CPUPID_MASK;
 964}
 965#else
 966static inline int page_cpupid_last(struct page *page)
 967{
 968	return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
 969}
 970
 971extern int page_cpupid_xchg_last(struct page *page, int cpupid);
 972
 973static inline void page_cpupid_reset_last(struct page *page)
 974{
 975	page->flags |= LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT;
 
 
 
 976}
 977#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
 978#else /* !CONFIG_NUMA_BALANCING */
 979static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
 980{
 981	return page_to_nid(page); /* XXX */
 982}
 983
 984static inline int page_cpupid_last(struct page *page)
 985{
 986	return page_to_nid(page); /* XXX */
 987}
 988
 989static inline int cpupid_to_nid(int cpupid)
 990{
 991	return -1;
 992}
 993
 994static inline int cpupid_to_pid(int cpupid)
 995{
 996	return -1;
 997}
 998
 999static inline int cpupid_to_cpu(int cpupid)
1000{
1001	return -1;
1002}
1003
1004static inline int cpu_pid_to_cpupid(int nid, int pid)
1005{
1006	return -1;
1007}
1008
1009static inline bool cpupid_pid_unset(int cpupid)
1010{
1011	return 1;
1012}
1013
1014static inline void page_cpupid_reset_last(struct page *page)
1015{
1016}
1017
1018static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
1019{
1020	return false;
1021}
1022#endif /* CONFIG_NUMA_BALANCING */
1023
1024static inline struct zone *page_zone(const struct page *page)
1025{
1026	return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
1027}
1028
1029static inline pg_data_t *page_pgdat(const struct page *page)
1030{
1031	return NODE_DATA(page_to_nid(page));
1032}
1033
1034#ifdef SECTION_IN_PAGE_FLAGS
1035static inline void set_page_section(struct page *page, unsigned long section)
1036{
1037	page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
1038	page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
1039}
1040
1041static inline unsigned long page_to_section(const struct page *page)
1042{
1043	return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
1044}
1045#endif
1046
1047static inline void set_page_zone(struct page *page, enum zone_type zone)
1048{
1049	page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
1050	page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
1051}
1052
1053static inline void set_page_node(struct page *page, unsigned long node)
1054{
1055	page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
1056	page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
1057}
1058
1059static inline void set_page_links(struct page *page, enum zone_type zone,
1060	unsigned long node, unsigned long pfn)
1061{
1062	set_page_zone(page, zone);
1063	set_page_node(page, node);
1064#ifdef SECTION_IN_PAGE_FLAGS
1065	set_page_section(page, pfn_to_section_nr(pfn));
1066#endif
1067}
1068
1069#ifdef CONFIG_MEMCG
1070static inline struct mem_cgroup *page_memcg(struct page *page)
1071{
1072	return page->mem_cgroup;
1073}
1074static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1075{
1076	WARN_ON_ONCE(!rcu_read_lock_held());
1077	return READ_ONCE(page->mem_cgroup);
1078}
1079#else
1080static inline struct mem_cgroup *page_memcg(struct page *page)
1081{
1082	return NULL;
1083}
1084static inline struct mem_cgroup *page_memcg_rcu(struct page *page)
1085{
1086	WARN_ON_ONCE(!rcu_read_lock_held());
1087	return NULL;
1088}
1089#endif
1090
1091/*
1092 * Some inline functions in vmstat.h depend on page_zone()
1093 */
1094#include <linux/vmstat.h>
1095
1096static __always_inline void *lowmem_page_address(const struct page *page)
1097{
1098	return page_to_virt(page);
1099}
1100
1101#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
1102#define HASHED_PAGE_VIRTUAL
1103#endif
1104
1105#if defined(WANT_PAGE_VIRTUAL)
1106static inline void *page_address(const struct page *page)
1107{
1108	return page->virtual;
1109}
1110static inline void set_page_address(struct page *page, void *address)
1111{
1112	page->virtual = address;
1113}
1114#define page_address_init()  do { } while(0)
1115#endif
1116
1117#if defined(HASHED_PAGE_VIRTUAL)
1118void *page_address(const struct page *page);
1119void set_page_address(struct page *page, void *virtual);
1120void page_address_init(void);
1121#endif
1122
1123#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
1124#define page_address(page) lowmem_page_address(page)
1125#define set_page_address(page, address)  do { } while(0)
1126#define page_address_init()  do { } while(0)
1127#endif
1128
1129extern void *page_rmapping(struct page *page);
1130extern struct anon_vma *page_anon_vma(struct page *page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1131extern struct address_space *page_mapping(struct page *page);
1132
 
 
 
 
 
 
1133extern struct address_space *__page_file_mapping(struct page *);
1134
1135static inline
1136struct address_space *page_file_mapping(struct page *page)
1137{
1138	if (unlikely(PageSwapCache(page)))
1139		return __page_file_mapping(page);
1140
1141	return page->mapping;
1142}
1143
1144extern pgoff_t __page_file_index(struct page *page);
 
 
 
1145
1146/*
1147 * Return the pagecache index of the passed page.  Regular pagecache pages
1148 * use ->index whereas swapcache pages use swp_offset(->private)
1149 */
1150static inline pgoff_t page_index(struct page *page)
1151{
1152	if (unlikely(PageSwapCache(page)))
1153		return __page_file_index(page);
1154	return page->index;
1155}
1156
1157bool page_mapped(struct page *page);
1158struct address_space *page_mapping(struct page *page);
1159struct address_space *page_mapping_file(struct page *page);
1160
1161/*
1162 * Return true only if the page has been allocated with
1163 * ALLOC_NO_WATERMARKS and the low watermark was not
1164 * met implying that the system is under some pressure.
1165 */
1166static inline bool page_is_pfmemalloc(struct page *page)
1167{
1168	/*
1169	 * Page index cannot be this large so this must be
1170	 * a pfmemalloc page.
1171	 */
1172	return page->index == -1UL;
1173}
1174
1175/*
1176 * Only to be called by the page allocator on a freshly allocated
1177 * page.
1178 */
1179static inline void set_page_pfmemalloc(struct page *page)
1180{
1181	page->index = -1UL;
1182}
1183
1184static inline void clear_page_pfmemalloc(struct page *page)
1185{
1186	page->index = 0;
1187}
1188
1189/*
1190 * Different kinds of faults, as returned by handle_mm_fault().
1191 * Used to decide whether a process gets delivered SIGBUS or
1192 * just gets major/minor fault counters bumped up.
1193 */
1194
 
 
1195#define VM_FAULT_OOM	0x0001
1196#define VM_FAULT_SIGBUS	0x0002
1197#define VM_FAULT_MAJOR	0x0004
1198#define VM_FAULT_WRITE	0x0008	/* Special case for get_user_pages */
1199#define VM_FAULT_HWPOISON 0x0010	/* Hit poisoned small page */
1200#define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
1201#define VM_FAULT_SIGSEGV 0x0040
1202
1203#define VM_FAULT_NOPAGE	0x0100	/* ->fault installed the pte, not return page */
1204#define VM_FAULT_LOCKED	0x0200	/* ->fault locked the returned page */
1205#define VM_FAULT_RETRY	0x0400	/* ->fault blocked, must retry */
1206#define VM_FAULT_FALLBACK 0x0800	/* huge page fault failed, fall back to small */
1207#define VM_FAULT_DONE_COW   0x1000	/* ->fault has fully handled COW */
1208#define VM_FAULT_NEEDDSYNC  0x2000	/* ->fault did not modify page tables
1209					 * and needs fsync() to complete (for
1210					 * synchronous page faults in DAX) */
1211
1212#define VM_FAULT_ERROR	(VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
1213			 VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
1214			 VM_FAULT_FALLBACK)
1215
1216#define VM_FAULT_RESULT_TRACE \
1217	{ VM_FAULT_OOM,			"OOM" }, \
1218	{ VM_FAULT_SIGBUS,		"SIGBUS" }, \
1219	{ VM_FAULT_MAJOR,		"MAJOR" }, \
1220	{ VM_FAULT_WRITE,		"WRITE" }, \
1221	{ VM_FAULT_HWPOISON,		"HWPOISON" }, \
1222	{ VM_FAULT_HWPOISON_LARGE,	"HWPOISON_LARGE" }, \
1223	{ VM_FAULT_SIGSEGV,		"SIGSEGV" }, \
1224	{ VM_FAULT_NOPAGE,		"NOPAGE" }, \
1225	{ VM_FAULT_LOCKED,		"LOCKED" }, \
1226	{ VM_FAULT_RETRY,		"RETRY" }, \
1227	{ VM_FAULT_FALLBACK,		"FALLBACK" }, \
1228	{ VM_FAULT_DONE_COW,		"DONE_COW" }, \
1229	{ VM_FAULT_NEEDDSYNC,		"NEEDDSYNC" }
1230
1231/* Encode hstate index for a hwpoisoned large page */
1232#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
1233#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
1234
1235/*
1236 * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
1237 */
1238extern void pagefault_out_of_memory(void);
1239
1240#define offset_in_page(p)	((unsigned long)(p) & ~PAGE_MASK)
1241
1242/*
1243 * Flags passed to show_mem() and show_free_areas() to suppress output in
1244 * various contexts.
1245 */
1246#define SHOW_MEM_FILTER_NODES		(0x0001u)	/* disallowed nodes */
1247
1248extern void show_free_areas(unsigned int flags, nodemask_t *nodemask);
 
1249
1250extern bool can_do_mlock(void);
 
 
 
 
 
 
 
 
 
 
1251extern int user_shm_lock(size_t, struct user_struct *);
1252extern void user_shm_unlock(size_t, struct user_struct *);
1253
1254/*
1255 * Parameter block passed down to zap_pte_range in exceptional cases.
1256 */
1257struct zap_details {
 
1258	struct address_space *check_mapping;	/* Check page->mapping if set */
1259	pgoff_t	first_index;			/* Lowest page->index to unmap */
1260	pgoff_t last_index;			/* Highest page->index to unmap */
1261};
1262
1263struct page *_vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1264			     pte_t pte, bool with_public_device);
1265#define vm_normal_page(vma, addr, pte) _vm_normal_page(vma, addr, pte, false)
1266
1267struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
1268				pmd_t pmd);
1269
1270int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
1271		unsigned long size);
1272void zap_page_range(struct vm_area_struct *vma, unsigned long address,
1273		unsigned long size);
1274void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1275		unsigned long start, unsigned long end);
1276
1277/**
1278 * mm_walk - callbacks for walk_page_range
 
1279 * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
1280 *	       this handler should only handle pud_trans_huge() puds.
1281 *	       the pmd_entry or pte_entry callbacks will be used for
1282 *	       regular PUDs.
1283 * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
1284 *	       this handler is required to be able to handle
1285 *	       pmd_trans_huge() pmds.  They may simply choose to
1286 *	       split_huge_page() instead of handling it explicitly.
1287 * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
1288 * @pte_hole: if set, called for each hole at all levels
1289 * @hugetlb_entry: if set, called for each hugetlb entry
1290 * @test_walk: caller specific callback function to determine whether
1291 *             we walk over the current vma or not. Returning 0
1292 *             value means "do page table walk over the current vma,"
1293 *             and a negative one means "abort current page table walk
1294 *             right now." 1 means "skip the current vma."
1295 * @mm:        mm_struct representing the target process of page table walk
1296 * @vma:       vma currently walked (NULL if walking outside vmas)
1297 * @private:   private data for callbacks' usage
1298 *
1299 * (see the comment on walk_page_range() for more details)
1300 */
1301struct mm_walk {
 
 
1302	int (*pud_entry)(pud_t *pud, unsigned long addr,
1303			 unsigned long next, struct mm_walk *walk);
1304	int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
1305			 unsigned long next, struct mm_walk *walk);
1306	int (*pte_entry)(pte_t *pte, unsigned long addr,
1307			 unsigned long next, struct mm_walk *walk);
1308	int (*pte_hole)(unsigned long addr, unsigned long next,
1309			struct mm_walk *walk);
1310	int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
1311			     unsigned long addr, unsigned long next,
1312			     struct mm_walk *walk);
1313	int (*test_walk)(unsigned long addr, unsigned long next,
1314			struct mm_walk *walk);
1315	struct mm_struct *mm;
1316	struct vm_area_struct *vma;
1317	void *private;
1318};
1319
1320int walk_page_range(unsigned long addr, unsigned long end,
1321		struct mm_walk *walk);
1322int walk_page_vma(struct vm_area_struct *vma, struct mm_walk *walk);
1323void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
1324		unsigned long end, unsigned long floor, unsigned long ceiling);
1325int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
1326			struct vm_area_struct *vma);
1327int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
1328			     unsigned long *start, unsigned long *end,
1329			     pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
1330int follow_pfn(struct vm_area_struct *vma, unsigned long address,
1331	unsigned long *pfn);
1332int follow_phys(struct vm_area_struct *vma, unsigned long address,
1333		unsigned int flags, unsigned long *prot, resource_size_t *phys);
1334int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
1335			void *buf, int len, int write);
1336
 
 
 
 
 
 
1337extern void truncate_pagecache(struct inode *inode, loff_t new);
1338extern void truncate_setsize(struct inode *inode, loff_t newsize);
1339void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1340void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1341int truncate_inode_page(struct address_space *mapping, struct page *page);
1342int generic_error_remove_page(struct address_space *mapping, struct page *page);
1343int invalidate_inode_page(struct page *page);
1344
1345#ifdef CONFIG_MMU
1346extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
1347		unsigned int flags);
1348extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1349			    unsigned long address, unsigned int fault_flags,
1350			    bool *unlocked);
1351void unmap_mapping_pages(struct address_space *mapping,
1352		pgoff_t start, pgoff_t nr, bool even_cows);
1353void unmap_mapping_range(struct address_space *mapping,
1354		loff_t const holebegin, loff_t const holelen, int even_cows);
1355#else
1356static inline int handle_mm_fault(struct vm_area_struct *vma,
1357		unsigned long address, unsigned int flags)
 
1358{
1359	/* should never happen if there's no MMU */
1360	BUG();
1361	return VM_FAULT_SIGBUS;
1362}
1363static inline int fixup_user_fault(struct task_struct *tsk,
1364		struct mm_struct *mm, unsigned long address,
1365		unsigned int fault_flags, bool *unlocked)
1366{
1367	/* should never happen if there's no MMU */
1368	BUG();
1369	return -EFAULT;
1370}
1371static inline void unmap_mapping_pages(struct address_space *mapping,
1372		pgoff_t start, pgoff_t nr, bool even_cows) { }
1373static inline void unmap_mapping_range(struct address_space *mapping,
1374		loff_t const holebegin, loff_t const holelen, int even_cows) { }
1375#endif
1376
1377static inline void unmap_shared_mapping_range(struct address_space *mapping,
1378		loff_t const holebegin, loff_t const holelen)
1379{
1380	unmap_mapping_range(mapping, holebegin, holelen, 0);
1381}
1382
1383extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
1384		void *buf, int len, unsigned int gup_flags);
1385extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1386		void *buf, int len, unsigned int gup_flags);
1387extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
1388		unsigned long addr, void *buf, int len, unsigned int gup_flags);
1389
1390long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
1391			    unsigned long start, unsigned long nr_pages,
1392			    unsigned int gup_flags, struct page **pages,
1393			    struct vm_area_struct **vmas, int *locked);
1394long get_user_pages(unsigned long start, unsigned long nr_pages,
1395			    unsigned int gup_flags, struct page **pages,
1396			    struct vm_area_struct **vmas);
1397long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
1398		    unsigned int gup_flags, struct page **pages, int *locked);
1399long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
1400		    struct page **pages, unsigned int gup_flags);
1401#ifdef CONFIG_FS_DAX
1402long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
1403			    unsigned int gup_flags, struct page **pages,
1404			    struct vm_area_struct **vmas);
1405#else
1406static inline long get_user_pages_longterm(unsigned long start,
1407		unsigned long nr_pages, unsigned int gup_flags,
1408		struct page **pages, struct vm_area_struct **vmas)
1409{
1410	return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
1411}
1412#endif /* CONFIG_FS_DAX */
1413
 
 
 
 
 
 
 
 
1414int get_user_pages_fast(unsigned long start, int nr_pages, int write,
1415			struct page **pages);
1416
1417/* Container for pinned pfns / pages */
1418struct frame_vector {
1419	unsigned int nr_allocated;	/* Number of frames we have space for */
1420	unsigned int nr_frames;	/* Number of frames stored in ptrs array */
1421	bool got_ref;		/* Did we pin pages by getting page ref? */
1422	bool is_pfns;		/* Does array contain pages or pfns? */
1423	void *ptrs[0];		/* Array of pinned pfns / pages. Use
1424				 * pfns_vector_pages() or pfns_vector_pfns()
1425				 * for access */
1426};
1427
1428struct frame_vector *frame_vector_create(unsigned int nr_frames);
1429void frame_vector_destroy(struct frame_vector *vec);
1430int get_vaddr_frames(unsigned long start, unsigned int nr_pfns,
1431		     unsigned int gup_flags, struct frame_vector *vec);
1432void put_vaddr_frames(struct frame_vector *vec);
1433int frame_vector_to_pages(struct frame_vector *vec);
1434void frame_vector_to_pfns(struct frame_vector *vec);
1435
1436static inline unsigned int frame_vector_count(struct frame_vector *vec)
1437{
1438	return vec->nr_frames;
1439}
1440
1441static inline struct page **frame_vector_pages(struct frame_vector *vec)
1442{
1443	if (vec->is_pfns) {
1444		int err = frame_vector_to_pages(vec);
1445
1446		if (err)
1447			return ERR_PTR(err);
1448	}
1449	return (struct page **)(vec->ptrs);
1450}
1451
1452static inline unsigned long *frame_vector_pfns(struct frame_vector *vec)
1453{
1454	if (!vec->is_pfns)
1455		frame_vector_to_pfns(vec);
1456	return (unsigned long *)(vec->ptrs);
1457}
1458
1459struct kvec;
1460int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
1461			struct page **pages);
1462int get_kernel_page(unsigned long start, int write, struct page **pages);
1463struct page *get_dump_page(unsigned long addr);
1464
1465extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
1466extern void do_invalidatepage(struct page *page, unsigned int offset,
1467			      unsigned int length);
1468
1469void __set_page_dirty(struct page *, struct address_space *, int warn);
1470int __set_page_dirty_nobuffers(struct page *page);
1471int __set_page_dirty_no_writeback(struct page *page);
1472int redirty_page_for_writepage(struct writeback_control *wbc,
1473				struct page *page);
1474void account_page_dirtied(struct page *page, struct address_space *mapping);
1475void account_page_cleaned(struct page *page, struct address_space *mapping,
1476			  struct bdi_writeback *wb);
1477int set_page_dirty(struct page *page);
1478int set_page_dirty_lock(struct page *page);
1479void __cancel_dirty_page(struct page *page);
1480static inline void cancel_dirty_page(struct page *page)
 
 
 
1481{
1482	/* Avoid atomic ops, locking, etc. when not actually needed. */
1483	if (PageDirty(page))
1484		__cancel_dirty_page(page);
1485}
1486int clear_page_dirty_for_io(struct page *page);
1487
1488int get_cmdline(struct task_struct *task, char *buffer, int buflen);
 
 
 
 
 
 
1489
1490static inline bool vma_is_anonymous(struct vm_area_struct *vma)
 
1491{
1492	return !vma->vm_ops;
1493}
1494
1495#ifdef CONFIG_SHMEM
1496/*
1497 * The vma_is_shmem is not inline because it is used only by slow
1498 * paths in userfault.
1499 */
1500bool vma_is_shmem(struct vm_area_struct *vma);
1501#else
1502static inline bool vma_is_shmem(struct vm_area_struct *vma) { return false; }
1503#endif
1504
1505int vma_is_stack_for_current(struct vm_area_struct *vma);
 
1506
1507extern unsigned long move_page_tables(struct vm_area_struct *vma,
1508		unsigned long old_addr, struct vm_area_struct *new_vma,
1509		unsigned long new_addr, unsigned long len,
1510		bool need_rmap_locks);
1511extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
1512			      unsigned long end, pgprot_t newprot,
1513			      int dirty_accountable, int prot_numa);
1514extern int mprotect_fixup(struct vm_area_struct *vma,
1515			  struct vm_area_struct **pprev, unsigned long start,
1516			  unsigned long end, unsigned long newflags);
1517
1518/*
1519 * doesn't attempt to fault and will return short.
1520 */
1521int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
1522			  struct page **pages);
1523/*
1524 * per-process(per-mm_struct) statistics.
1525 */
1526static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
1527{
1528	long val = atomic_long_read(&mm->rss_stat.count[member]);
1529
1530#ifdef SPLIT_RSS_COUNTING
1531	/*
1532	 * counter is updated in asynchronous manner and may go to minus.
1533	 * But it's never be expected number for users.
1534	 */
1535	if (val < 0)
1536		val = 0;
1537#endif
1538	return (unsigned long)val;
1539}
1540
1541static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
1542{
1543	atomic_long_add(value, &mm->rss_stat.count[member]);
1544}
1545
1546static inline void inc_mm_counter(struct mm_struct *mm, int member)
1547{
1548	atomic_long_inc(&mm->rss_stat.count[member]);
1549}
1550
1551static inline void dec_mm_counter(struct mm_struct *mm, int member)
1552{
1553	atomic_long_dec(&mm->rss_stat.count[member]);
1554}
1555
1556/* Optimized variant when page is already known not to be PageAnon */
1557static inline int mm_counter_file(struct page *page)
1558{
1559	if (PageSwapBacked(page))
1560		return MM_SHMEMPAGES;
1561	return MM_FILEPAGES;
1562}
1563
1564static inline int mm_counter(struct page *page)
1565{
1566	if (PageAnon(page))
1567		return MM_ANONPAGES;
1568	return mm_counter_file(page);
1569}
1570
1571static inline unsigned long get_mm_rss(struct mm_struct *mm)
1572{
1573	return get_mm_counter(mm, MM_FILEPAGES) +
1574		get_mm_counter(mm, MM_ANONPAGES) +
1575		get_mm_counter(mm, MM_SHMEMPAGES);
1576}
1577
1578static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
1579{
1580	return max(mm->hiwater_rss, get_mm_rss(mm));
1581}
1582
1583static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
1584{
1585	return max(mm->hiwater_vm, mm->total_vm);
1586}
1587
1588static inline void update_hiwater_rss(struct mm_struct *mm)
1589{
1590	unsigned long _rss = get_mm_rss(mm);
1591
1592	if ((mm)->hiwater_rss < _rss)
1593		(mm)->hiwater_rss = _rss;
1594}
1595
1596static inline void update_hiwater_vm(struct mm_struct *mm)
1597{
1598	if (mm->hiwater_vm < mm->total_vm)
1599		mm->hiwater_vm = mm->total_vm;
1600}
1601
1602static inline void reset_mm_hiwater_rss(struct mm_struct *mm)
1603{
1604	mm->hiwater_rss = get_mm_rss(mm);
1605}
1606
1607static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
1608					 struct mm_struct *mm)
1609{
1610	unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
1611
1612	if (*maxrss < hiwater_rss)
1613		*maxrss = hiwater_rss;
1614}
1615
1616#if defined(SPLIT_RSS_COUNTING)
1617void sync_mm_rss(struct mm_struct *mm);
1618#else
1619static inline void sync_mm_rss(struct mm_struct *mm)
1620{
1621}
1622#endif
1623
1624#ifndef __HAVE_ARCH_PTE_DEVMAP
1625static inline int pte_devmap(pte_t pte)
1626{
1627	return 0;
1628}
1629#endif
1630
1631int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
1632
1633extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
1634			       spinlock_t **ptl);
1635static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
1636				    spinlock_t **ptl)
1637{
1638	pte_t *ptep;
1639	__cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
1640	return ptep;
1641}
1642
1643#ifdef __PAGETABLE_P4D_FOLDED
1644static inline int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1645						unsigned long address)
1646{
1647	return 0;
1648}
1649#else
1650int __p4d_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
1651#endif
1652
1653#if defined(__PAGETABLE_PUD_FOLDED) || !defined(CONFIG_MMU)
1654static inline int __pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1655						unsigned long address)
1656{
1657	return 0;
1658}
1659static inline void mm_inc_nr_puds(struct mm_struct *mm) {}
1660static inline void mm_dec_nr_puds(struct mm_struct *mm) {}
1661
1662#else
1663int __pud_alloc(struct mm_struct *mm, p4d_t *p4d, unsigned long address);
1664
1665static inline void mm_inc_nr_puds(struct mm_struct *mm)
1666{
1667	atomic_long_add(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1668}
1669
1670static inline void mm_dec_nr_puds(struct mm_struct *mm)
1671{
1672	atomic_long_sub(PTRS_PER_PUD * sizeof(pud_t), &mm->pgtables_bytes);
1673}
1674#endif
1675
1676#if defined(__PAGETABLE_PMD_FOLDED) || !defined(CONFIG_MMU)
1677static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
1678						unsigned long address)
1679{
1680	return 0;
1681}
1682
1683static inline void mm_inc_nr_pmds(struct mm_struct *mm) {}
1684static inline void mm_dec_nr_pmds(struct mm_struct *mm) {}
1685
1686#else
1687int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
1688
1689static inline void mm_inc_nr_pmds(struct mm_struct *mm)
1690{
1691	atomic_long_add(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1692}
1693
1694static inline void mm_dec_nr_pmds(struct mm_struct *mm)
1695{
1696	atomic_long_sub(PTRS_PER_PMD * sizeof(pmd_t), &mm->pgtables_bytes);
1697}
1698#endif
1699
1700#ifdef CONFIG_MMU
1701static inline void mm_pgtables_bytes_init(struct mm_struct *mm)
1702{
1703	atomic_long_set(&mm->pgtables_bytes, 0);
1704}
1705
1706static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1707{
1708	return atomic_long_read(&mm->pgtables_bytes);
1709}
1710
1711static inline void mm_inc_nr_ptes(struct mm_struct *mm)
1712{
1713	atomic_long_add(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1714}
1715
1716static inline void mm_dec_nr_ptes(struct mm_struct *mm)
1717{
1718	atomic_long_sub(PTRS_PER_PTE * sizeof(pte_t), &mm->pgtables_bytes);
1719}
1720#else
1721
1722static inline void mm_pgtables_bytes_init(struct mm_struct *mm) {}
1723static inline unsigned long mm_pgtables_bytes(const struct mm_struct *mm)
1724{
1725	return 0;
1726}
1727
1728static inline void mm_inc_nr_ptes(struct mm_struct *mm) {}
1729static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
1730#endif
1731
1732int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
1733int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
1734
1735/*
1736 * The following ifdef needed to get the 4level-fixup.h header to work.
1737 * Remove it when 4level-fixup.h has been removed.
1738 */
1739#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
1740
1741#ifndef __ARCH_HAS_5LEVEL_HACK
1742static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
1743		unsigned long address)
1744{
1745	return (unlikely(pgd_none(*pgd)) && __p4d_alloc(mm, pgd, address)) ?
1746		NULL : p4d_offset(pgd, address);
1747}
1748
1749static inline pud_t *pud_alloc(struct mm_struct *mm, p4d_t *p4d,
1750		unsigned long address)
1751{
1752	return (unlikely(p4d_none(*p4d)) && __pud_alloc(mm, p4d, address)) ?
1753		NULL : pud_offset(p4d, address);
1754}
1755#endif /* !__ARCH_HAS_5LEVEL_HACK */
1756
1757static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
1758{
1759	return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
1760		NULL: pmd_offset(pud, address);
1761}
1762#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
1763
1764#if USE_SPLIT_PTE_PTLOCKS
1765#if ALLOC_SPLIT_PTLOCKS
1766void __init ptlock_cache_init(void);
1767extern bool ptlock_alloc(struct page *page);
1768extern void ptlock_free(struct page *page);
1769
1770static inline spinlock_t *ptlock_ptr(struct page *page)
1771{
1772	return page->ptl;
1773}
1774#else /* ALLOC_SPLIT_PTLOCKS */
1775static inline void ptlock_cache_init(void)
1776{
1777}
1778
1779static inline bool ptlock_alloc(struct page *page)
1780{
1781	return true;
1782}
1783
1784static inline void ptlock_free(struct page *page)
1785{
1786}
1787
1788static inline spinlock_t *ptlock_ptr(struct page *page)
1789{
1790	return &page->ptl;
1791}
1792#endif /* ALLOC_SPLIT_PTLOCKS */
1793
1794static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1795{
1796	return ptlock_ptr(pmd_page(*pmd));
1797}
1798
1799static inline bool ptlock_init(struct page *page)
1800{
1801	/*
1802	 * prep_new_page() initialize page->private (and therefore page->ptl)
1803	 * with 0. Make sure nobody took it in use in between.
1804	 *
1805	 * It can happen if arch try to use slab for page table allocation:
1806	 * slab code uses page->slab_cache, which share storage with page->ptl.
 
1807	 */
1808	VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
1809	if (!ptlock_alloc(page))
1810		return false;
1811	spin_lock_init(ptlock_ptr(page));
1812	return true;
1813}
1814
1815/* Reset page->mapping so free_pages_check won't complain. */
1816static inline void pte_lock_deinit(struct page *page)
1817{
1818	page->mapping = NULL;
1819	ptlock_free(page);
1820}
1821
1822#else	/* !USE_SPLIT_PTE_PTLOCKS */
1823/*
1824 * We use mm->page_table_lock to guard all pagetable pages of the mm.
1825 */
1826static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
1827{
1828	return &mm->page_table_lock;
1829}
1830static inline void ptlock_cache_init(void) {}
1831static inline bool ptlock_init(struct page *page) { return true; }
1832static inline void pte_lock_deinit(struct page *page) {}
1833#endif /* USE_SPLIT_PTE_PTLOCKS */
1834
1835static inline void pgtable_init(void)
1836{
1837	ptlock_cache_init();
1838	pgtable_cache_init();
1839}
1840
1841static inline bool pgtable_page_ctor(struct page *page)
1842{
1843	if (!ptlock_init(page))
1844		return false;
1845	inc_zone_page_state(page, NR_PAGETABLE);
1846	return true;
1847}
1848
1849static inline void pgtable_page_dtor(struct page *page)
1850{
1851	pte_lock_deinit(page);
1852	dec_zone_page_state(page, NR_PAGETABLE);
1853}
1854
1855#define pte_offset_map_lock(mm, pmd, address, ptlp)	\
1856({							\
1857	spinlock_t *__ptl = pte_lockptr(mm, pmd);	\
1858	pte_t *__pte = pte_offset_map(pmd, address);	\
1859	*(ptlp) = __ptl;				\
1860	spin_lock(__ptl);				\
1861	__pte;						\
1862})
1863
1864#define pte_unmap_unlock(pte, ptl)	do {		\
1865	spin_unlock(ptl);				\
1866	pte_unmap(pte);					\
1867} while (0)
1868
1869#define pte_alloc(mm, pmd, address)			\
1870	(unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, pmd, address))
1871
1872#define pte_alloc_map(mm, pmd, address)			\
1873	(pte_alloc(mm, pmd, address) ? NULL : pte_offset_map(pmd, address))
1874
1875#define pte_alloc_map_lock(mm, pmd, address, ptlp)	\
1876	(pte_alloc(mm, pmd, address) ?			\
1877		 NULL : pte_offset_map_lock(mm, pmd, address, ptlp))
 
1878
1879#define pte_alloc_kernel(pmd, address)			\
1880	((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
1881		NULL: pte_offset_kernel(pmd, address))
1882
1883#if USE_SPLIT_PMD_PTLOCKS
1884
1885static struct page *pmd_to_page(pmd_t *pmd)
1886{
1887	unsigned long mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
1888	return virt_to_page((void *)((unsigned long) pmd & mask));
1889}
1890
1891static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1892{
1893	return ptlock_ptr(pmd_to_page(pmd));
1894}
1895
1896static inline bool pgtable_pmd_page_ctor(struct page *page)
1897{
1898#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1899	page->pmd_huge_pte = NULL;
1900#endif
1901	return ptlock_init(page);
1902}
1903
1904static inline void pgtable_pmd_page_dtor(struct page *page)
1905{
1906#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1907	VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
1908#endif
1909	ptlock_free(page);
1910}
1911
1912#define pmd_huge_pte(mm, pmd) (pmd_to_page(pmd)->pmd_huge_pte)
1913
1914#else
1915
1916static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
1917{
1918	return &mm->page_table_lock;
1919}
1920
1921static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
1922static inline void pgtable_pmd_page_dtor(struct page *page) {}
1923
1924#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
1925
1926#endif
1927
1928static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
1929{
1930	spinlock_t *ptl = pmd_lockptr(mm, pmd);
1931	spin_lock(ptl);
1932	return ptl;
1933}
1934
1935/*
1936 * No scalability reason to split PUD locks yet, but follow the same pattern
1937 * as the PMD locks to make it easier if we decide to.  The VM should not be
1938 * considered ready to switch to split PUD locks yet; there may be places
1939 * which need to be converted from page_table_lock.
1940 */
1941static inline spinlock_t *pud_lockptr(struct mm_struct *mm, pud_t *pud)
1942{
1943	return &mm->page_table_lock;
1944}
1945
1946static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
1947{
1948	spinlock_t *ptl = pud_lockptr(mm, pud);
1949
1950	spin_lock(ptl);
1951	return ptl;
1952}
1953
1954extern void __init pagecache_init(void);
1955extern void free_area_init(unsigned long * zones_size);
1956extern void free_area_init_node(int nid, unsigned long * zones_size,
1957		unsigned long zone_start_pfn, unsigned long *zholes_size);
1958extern void free_initmem(void);
1959
1960/*
1961 * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
1962 * into the buddy system. The freed pages will be poisoned with pattern
1963 * "poison" if it's within range [0, UCHAR_MAX].
1964 * Return pages freed into the buddy system.
1965 */
1966extern unsigned long free_reserved_area(void *start, void *end,
1967					int poison, char *s);
1968
1969#ifdef	CONFIG_HIGHMEM
1970/*
1971 * Free a highmem page into the buddy system, adjusting totalhigh_pages
1972 * and totalram_pages.
1973 */
1974extern void free_highmem_page(struct page *page);
1975#endif
1976
1977extern void adjust_managed_page_count(struct page *page, long count);
1978extern void mem_init_print_info(const char *str);
1979
1980extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end);
1981
1982/* Free the reserved page into the buddy system, so it gets managed. */
1983static inline void __free_reserved_page(struct page *page)
1984{
1985	ClearPageReserved(page);
1986	init_page_count(page);
1987	__free_page(page);
1988}
1989
1990static inline void free_reserved_page(struct page *page)
1991{
1992	__free_reserved_page(page);
1993	adjust_managed_page_count(page, 1);
1994}
1995
1996static inline void mark_page_reserved(struct page *page)
1997{
1998	SetPageReserved(page);
1999	adjust_managed_page_count(page, -1);
2000}
2001
2002/*
2003 * Default method to free all the __init memory into the buddy system.
2004 * The freed pages will be poisoned with pattern "poison" if it's within
2005 * range [0, UCHAR_MAX].
2006 * Return pages freed into the buddy system.
2007 */
2008static inline unsigned long free_initmem_default(int poison)
2009{
2010	extern char __init_begin[], __init_end[];
2011
2012	return free_reserved_area(&__init_begin, &__init_end,
2013				  poison, "unused kernel");
2014}
2015
2016static inline unsigned long get_num_physpages(void)
2017{
2018	int nid;
2019	unsigned long phys_pages = 0;
2020
2021	for_each_online_node(nid)
2022		phys_pages += node_present_pages(nid);
2023
2024	return phys_pages;
2025}
2026
2027#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
2028/*
2029 * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
2030 * zones, allocate the backing mem_map and account for memory holes in a more
2031 * architecture independent manner. This is a substitute for creating the
2032 * zone_sizes[] and zholes_size[] arrays and passing them to
2033 * free_area_init_node()
2034 *
2035 * An architecture is expected to register range of page frames backed by
2036 * physical memory with memblock_add[_node]() before calling
2037 * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
2038 * usage, an architecture is expected to do something like
2039 *
2040 * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
2041 * 							 max_highmem_pfn};
2042 * for_each_valid_physical_page_range()
2043 * 	memblock_add_node(base, size, nid)
2044 * free_area_init_nodes(max_zone_pfns);
2045 *
2046 * free_bootmem_with_active_regions() calls free_bootmem_node() for each
2047 * registered physical page range.  Similarly
2048 * sparse_memory_present_with_active_regions() calls memory_present() for
2049 * each range when SPARSEMEM is enabled.
2050 *
2051 * See mm/page_alloc.c for more information on each function exposed by
2052 * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
2053 */
2054extern void free_area_init_nodes(unsigned long *max_zone_pfn);
2055unsigned long node_map_pfn_alignment(void);
2056unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
2057						unsigned long end_pfn);
2058extern unsigned long absent_pages_in_range(unsigned long start_pfn,
2059						unsigned long end_pfn);
2060extern void get_pfn_range_for_nid(unsigned int nid,
2061			unsigned long *start_pfn, unsigned long *end_pfn);
2062extern unsigned long find_min_pfn_with_active_regions(void);
2063extern void free_bootmem_with_active_regions(int nid,
2064						unsigned long max_low_pfn);
2065extern void sparse_memory_present_with_active_regions(int nid);
2066
2067#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
2068
2069#if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
2070    !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
2071static inline int __early_pfn_to_nid(unsigned long pfn,
2072					struct mminit_pfnnid_cache *state)
2073{
2074	return 0;
2075}
2076#else
2077/* please see mm/page_alloc.c */
2078extern int __meminit early_pfn_to_nid(unsigned long pfn);
2079/* there is a per-arch backend function. */
2080extern int __meminit __early_pfn_to_nid(unsigned long pfn,
2081					struct mminit_pfnnid_cache *state);
2082#endif
2083
2084#ifdef CONFIG_HAVE_MEMBLOCK
2085void zero_resv_unavail(void);
2086#else
2087static inline void zero_resv_unavail(void) {}
2088#endif
2089
2090extern void set_dma_reserve(unsigned long new_dma_reserve);
2091extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
2092		enum memmap_context, struct vmem_altmap *);
2093extern void setup_per_zone_wmarks(void);
2094extern int __meminit init_per_zone_wmark_min(void);
2095extern void mem_init(void);
2096extern void __init mmap_init(void);
2097extern void show_mem(unsigned int flags, nodemask_t *nodemask);
2098extern long si_mem_available(void);
2099extern void si_meminfo(struct sysinfo * val);
2100extern void si_meminfo_node(struct sysinfo *val, int nid);
2101#ifdef __HAVE_ARCH_RESERVED_KERNEL_PAGES
2102extern unsigned long arch_reserved_kernel_pages(void);
2103#endif
2104
2105extern __printf(3, 4)
2106void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
2107
2108extern void setup_per_cpu_pageset(void);
2109
2110extern void zone_pcp_update(struct zone *zone);
2111extern void zone_pcp_reset(struct zone *zone);
2112
2113/* page_alloc.c */
2114extern int min_free_kbytes;
2115extern int watermark_scale_factor;
2116
2117/* nommu.c */
2118extern atomic_long_t mmap_pages_allocated;
2119extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
2120
2121/* interval_tree.c */
2122void vma_interval_tree_insert(struct vm_area_struct *node,
2123			      struct rb_root_cached *root);
2124void vma_interval_tree_insert_after(struct vm_area_struct *node,
2125				    struct vm_area_struct *prev,
2126				    struct rb_root_cached *root);
2127void vma_interval_tree_remove(struct vm_area_struct *node,
2128			      struct rb_root_cached *root);
2129struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root_cached *root,
2130				unsigned long start, unsigned long last);
2131struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
2132				unsigned long start, unsigned long last);
2133
2134#define vma_interval_tree_foreach(vma, root, start, last)		\
2135	for (vma = vma_interval_tree_iter_first(root, start, last);	\
2136	     vma; vma = vma_interval_tree_iter_next(vma, start, last))
2137
 
 
 
 
 
 
2138void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
2139				   struct rb_root_cached *root);
2140void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
2141				   struct rb_root_cached *root);
2142struct anon_vma_chain *
2143anon_vma_interval_tree_iter_first(struct rb_root_cached *root,
2144				  unsigned long start, unsigned long last);
2145struct anon_vma_chain *anon_vma_interval_tree_iter_next(
2146	struct anon_vma_chain *node, unsigned long start, unsigned long last);
2147#ifdef CONFIG_DEBUG_VM_RB
2148void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
2149#endif
2150
2151#define anon_vma_interval_tree_foreach(avc, root, start, last)		 \
2152	for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
2153	     avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
2154
2155/* mmap.c */
2156extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
2157extern int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
2158	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
2159	struct vm_area_struct *expand);
2160static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
2161	unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
2162{
2163	return __vma_adjust(vma, start, end, pgoff, insert, NULL);
2164}
2165extern struct vm_area_struct *vma_merge(struct mm_struct *,
2166	struct vm_area_struct *prev, unsigned long addr, unsigned long end,
2167	unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
2168	struct mempolicy *, struct vm_userfaultfd_ctx);
2169extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
2170extern int __split_vma(struct mm_struct *, struct vm_area_struct *,
2171	unsigned long addr, int new_below);
2172extern int split_vma(struct mm_struct *, struct vm_area_struct *,
2173	unsigned long addr, int new_below);
2174extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
2175extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
2176	struct rb_node **, struct rb_node *);
2177extern void unlink_file_vma(struct vm_area_struct *);
2178extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
2179	unsigned long addr, unsigned long len, pgoff_t pgoff,
2180	bool *need_rmap_locks);
2181extern void exit_mmap(struct mm_struct *);
2182
2183static inline int check_data_rlimit(unsigned long rlim,
2184				    unsigned long new,
2185				    unsigned long start,
2186				    unsigned long end_data,
2187				    unsigned long start_data)
2188{
2189	if (rlim < RLIM_INFINITY) {
2190		if (((new - start) + (end_data - start_data)) > rlim)
2191			return -ENOSPC;
2192	}
2193
2194	return 0;
2195}
2196
2197extern int mm_take_all_locks(struct mm_struct *mm);
2198extern void mm_drop_all_locks(struct mm_struct *mm);
2199
2200extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
2201extern struct file *get_mm_exe_file(struct mm_struct *mm);
2202extern struct file *get_task_exe_file(struct task_struct *task);
2203
2204extern bool may_expand_vm(struct mm_struct *, vm_flags_t, unsigned long npages);
2205extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
2206
2207extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
2208				   const struct vm_special_mapping *sm);
2209extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
2210				   unsigned long addr, unsigned long len,
2211				   unsigned long flags,
2212				   const struct vm_special_mapping *spec);
2213/* This is an obsolete alternative to _install_special_mapping. */
2214extern int install_special_mapping(struct mm_struct *mm,
2215				   unsigned long addr, unsigned long len,
2216				   unsigned long flags, struct page **pages);
2217
2218extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
2219
2220extern unsigned long mmap_region(struct file *file, unsigned long addr,
2221	unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
2222	struct list_head *uf);
2223extern unsigned long do_mmap(struct file *file, unsigned long addr,
2224	unsigned long len, unsigned long prot, unsigned long flags,
2225	vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
2226	struct list_head *uf);
2227extern int do_munmap(struct mm_struct *, unsigned long, size_t,
2228		     struct list_head *uf);
2229
2230static inline unsigned long
2231do_mmap_pgoff(struct file *file, unsigned long addr,
2232	unsigned long len, unsigned long prot, unsigned long flags,
2233	unsigned long pgoff, unsigned long *populate,
2234	struct list_head *uf)
2235{
2236	return do_mmap(file, addr, len, prot, flags, 0, pgoff, populate, uf);
2237}
2238
2239#ifdef CONFIG_MMU
2240extern int __mm_populate(unsigned long addr, unsigned long len,
2241			 int ignore_errors);
2242static inline void mm_populate(unsigned long addr, unsigned long len)
2243{
2244	/* Ignore errors */
2245	(void) __mm_populate(addr, len, 1);
2246}
2247#else
2248static inline void mm_populate(unsigned long addr, unsigned long len) {}
2249#endif
2250
2251/* These take the mm semaphore themselves */
2252extern int __must_check vm_brk(unsigned long, unsigned long);
2253extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
2254extern int vm_munmap(unsigned long, size_t);
2255extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
2256        unsigned long, unsigned long,
2257        unsigned long, unsigned long);
2258
2259struct vm_unmapped_area_info {
2260#define VM_UNMAPPED_AREA_TOPDOWN 1
2261	unsigned long flags;
2262	unsigned long length;
2263	unsigned long low_limit;
2264	unsigned long high_limit;
2265	unsigned long align_mask;
2266	unsigned long align_offset;
2267};
2268
2269extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
2270extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
2271
2272/*
2273 * Search for an unmapped address range.
2274 *
2275 * We are looking for a range that:
2276 * - does not intersect with any VMA;
2277 * - is contained within the [low_limit, high_limit) interval;
2278 * - is at least the desired size.
2279 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
2280 */
2281static inline unsigned long
2282vm_unmapped_area(struct vm_unmapped_area_info *info)
2283{
2284	if (info->flags & VM_UNMAPPED_AREA_TOPDOWN)
 
 
2285		return unmapped_area_topdown(info);
2286	else
2287		return unmapped_area(info);
2288}
2289
2290/* truncate.c */
2291extern void truncate_inode_pages(struct address_space *, loff_t);
2292extern void truncate_inode_pages_range(struct address_space *,
2293				       loff_t lstart, loff_t lend);
2294extern void truncate_inode_pages_final(struct address_space *);
2295
2296/* generic vm_area_ops exported for stackable file systems */
2297extern int filemap_fault(struct vm_fault *vmf);
2298extern void filemap_map_pages(struct vm_fault *vmf,
2299		pgoff_t start_pgoff, pgoff_t end_pgoff);
2300extern int filemap_page_mkwrite(struct vm_fault *vmf);
2301
2302/* mm/page-writeback.c */
2303int __must_check write_one_page(struct page *page);
2304void task_dirty_inc(struct task_struct *tsk);
2305
2306/* readahead.c */
2307#define VM_MAX_READAHEAD	128	/* kbytes */
2308#define VM_MIN_READAHEAD	16	/* kbytes (includes current page) */
2309
2310int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
2311			pgoff_t offset, unsigned long nr_to_read);
2312
2313void page_cache_sync_readahead(struct address_space *mapping,
2314			       struct file_ra_state *ra,
2315			       struct file *filp,
2316			       pgoff_t offset,
2317			       unsigned long size);
2318
2319void page_cache_async_readahead(struct address_space *mapping,
2320				struct file_ra_state *ra,
2321				struct file *filp,
2322				struct page *pg,
2323				pgoff_t offset,
2324				unsigned long size);
2325
2326extern unsigned long stack_guard_gap;
 
2327/* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
2328extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
2329
2330/* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
2331extern int expand_downwards(struct vm_area_struct *vma,
2332		unsigned long address);
2333#if VM_GROWSUP
2334extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
2335#else
2336  #define expand_upwards(vma, address) (0)
2337#endif
2338
2339/* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2340extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
2341extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
2342					     struct vm_area_struct **pprev);
2343
2344/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
2345   NULL if none.  Assume start_addr < end_addr. */
2346static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
2347{
2348	struct vm_area_struct * vma = find_vma(mm,start_addr);
2349
2350	if (vma && end_addr <= vma->vm_start)
2351		vma = NULL;
2352	return vma;
2353}
2354
2355static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
2356{
2357	unsigned long vm_start = vma->vm_start;
2358
2359	if (vma->vm_flags & VM_GROWSDOWN) {
2360		vm_start -= stack_guard_gap;
2361		if (vm_start > vma->vm_start)
2362			vm_start = 0;
2363	}
2364	return vm_start;
2365}
2366
2367static inline unsigned long vm_end_gap(struct vm_area_struct *vma)
2368{
2369	unsigned long vm_end = vma->vm_end;
2370
2371	if (vma->vm_flags & VM_GROWSUP) {
2372		vm_end += stack_guard_gap;
2373		if (vm_end < vma->vm_end)
2374			vm_end = -PAGE_SIZE;
2375	}
2376	return vm_end;
2377}
2378
2379static inline unsigned long vma_pages(struct vm_area_struct *vma)
2380{
2381	return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
2382}
2383
2384/* Look up the first VMA which exactly match the interval vm_start ... vm_end */
2385static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
2386				unsigned long vm_start, unsigned long vm_end)
2387{
2388	struct vm_area_struct *vma = find_vma(mm, vm_start);
2389
2390	if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
2391		vma = NULL;
2392
2393	return vma;
2394}
2395
2396#ifdef CONFIG_MMU
2397pgprot_t vm_get_page_prot(unsigned long vm_flags);
2398void vma_set_page_prot(struct vm_area_struct *vma);
2399#else
2400static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
2401{
2402	return __pgprot(0);
2403}
2404static inline void vma_set_page_prot(struct vm_area_struct *vma)
2405{
2406	vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2407}
2408#endif
2409
2410#ifdef CONFIG_NUMA_BALANCING
2411unsigned long change_prot_numa(struct vm_area_struct *vma,
2412			unsigned long start, unsigned long end);
2413#endif
2414
2415struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
2416int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
2417			unsigned long pfn, unsigned long size, pgprot_t);
2418int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
2419int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
2420			unsigned long pfn);
2421int vm_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
2422			unsigned long pfn, pgprot_t pgprot);
2423int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
2424			pfn_t pfn);
2425int vm_insert_mixed_mkwrite(struct vm_area_struct *vma, unsigned long addr,
2426			pfn_t pfn);
2427int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
2428
2429static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
2430				unsigned long addr, struct page *page)
2431{
2432	int err = vm_insert_page(vma, addr, page);
2433
2434	if (err == -ENOMEM)
2435		return VM_FAULT_OOM;
2436	if (err < 0 && err != -EBUSY)
2437		return VM_FAULT_SIGBUS;
2438
2439	return VM_FAULT_NOPAGE;
2440}
2441
2442static inline vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma,
2443				unsigned long addr, pfn_t pfn)
2444{
2445	int err = vm_insert_mixed(vma, addr, pfn);
2446
2447	if (err == -ENOMEM)
2448		return VM_FAULT_OOM;
2449	if (err < 0 && err != -EBUSY)
2450		return VM_FAULT_SIGBUS;
2451
2452	return VM_FAULT_NOPAGE;
2453}
2454
2455static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
2456			unsigned long addr, unsigned long pfn)
2457{
2458	int err = vm_insert_pfn(vma, addr, pfn);
2459
2460	if (err == -ENOMEM)
2461		return VM_FAULT_OOM;
2462	if (err < 0 && err != -EBUSY)
2463		return VM_FAULT_SIGBUS;
2464
2465	return VM_FAULT_NOPAGE;
2466}
2467
2468static inline vm_fault_t vmf_error(int err)
2469{
2470	if (err == -ENOMEM)
2471		return VM_FAULT_OOM;
2472	return VM_FAULT_SIGBUS;
2473}
2474
2475struct page *follow_page_mask(struct vm_area_struct *vma,
2476			      unsigned long address, unsigned int foll_flags,
2477			      unsigned int *page_mask);
2478
2479static inline struct page *follow_page(struct vm_area_struct *vma,
2480		unsigned long address, unsigned int foll_flags)
2481{
2482	unsigned int unused_page_mask;
2483	return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
2484}
2485
2486#define FOLL_WRITE	0x01	/* check pte is writable */
2487#define FOLL_TOUCH	0x02	/* mark page accessed */
2488#define FOLL_GET	0x04	/* do get_page on page */
2489#define FOLL_DUMP	0x08	/* give error on hole if it would be zero */
2490#define FOLL_FORCE	0x10	/* get_user_pages read/write w/o permission */
2491#define FOLL_NOWAIT	0x20	/* if a disk transfer is needed, start the IO
2492				 * and return without waiting upon it */
2493#define FOLL_POPULATE	0x40	/* fault in page */
2494#define FOLL_SPLIT	0x80	/* don't return transhuge pages, split them */
2495#define FOLL_HWPOISON	0x100	/* check page is hwpoisoned */
2496#define FOLL_NUMA	0x200	/* force NUMA hinting page fault */
2497#define FOLL_MIGRATION	0x400	/* wait for page to replace migration entry */
2498#define FOLL_TRIED	0x800	/* a retry, previous pass started an IO */
2499#define FOLL_MLOCK	0x1000	/* lock present pages */
2500#define FOLL_REMOTE	0x2000	/* we are working on non-current tsk/mm */
2501#define FOLL_COW	0x4000	/* internal GUP flag */
2502#define FOLL_ANON	0x8000	/* don't do file mappings */
2503
2504static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
2505{
2506	if (vm_fault & VM_FAULT_OOM)
2507		return -ENOMEM;
2508	if (vm_fault & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
2509		return (foll_flags & FOLL_HWPOISON) ? -EHWPOISON : -EFAULT;
2510	if (vm_fault & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
2511		return -EFAULT;
2512	return 0;
2513}
2514
2515typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
2516			void *data);
2517extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
2518			       unsigned long size, pte_fn_t fn, void *data);
2519
2520
2521#ifdef CONFIG_PAGE_POISONING
2522extern bool page_poisoning_enabled(void);
2523extern void kernel_poison_pages(struct page *page, int numpages, int enable);
2524extern bool page_is_poisoned(struct page *page);
2525#else
2526static inline bool page_poisoning_enabled(void) { return false; }
2527static inline void kernel_poison_pages(struct page *page, int numpages,
2528					int enable) { }
2529static inline bool page_is_poisoned(struct page *page) { return false; }
2530#endif
2531
2532#ifdef CONFIG_DEBUG_PAGEALLOC
2533extern bool _debug_pagealloc_enabled;
2534extern void __kernel_map_pages(struct page *page, int numpages, int enable);
2535
2536static inline bool debug_pagealloc_enabled(void)
2537{
2538	return _debug_pagealloc_enabled;
2539}
 
2540
2541static inline void
2542kernel_map_pages(struct page *page, int numpages, int enable)
2543{
2544	if (!debug_pagealloc_enabled())
2545		return;
2546
2547	__kernel_map_pages(page, numpages, enable);
2548}
2549#ifdef CONFIG_HIBERNATION
2550extern bool kernel_page_present(struct page *page);
2551#endif	/* CONFIG_HIBERNATION */
2552#else	/* CONFIG_DEBUG_PAGEALLOC */
2553static inline void
2554kernel_map_pages(struct page *page, int numpages, int enable) {}
2555#ifdef CONFIG_HIBERNATION
2556static inline bool kernel_page_present(struct page *page) { return true; }
2557#endif	/* CONFIG_HIBERNATION */
2558static inline bool debug_pagealloc_enabled(void)
2559{
2560	return false;
2561}
2562#endif	/* CONFIG_DEBUG_PAGEALLOC */
2563
2564#ifdef __HAVE_ARCH_GATE_AREA
2565extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
2566extern int in_gate_area_no_mm(unsigned long addr);
2567extern int in_gate_area(struct mm_struct *mm, unsigned long addr);
 
2568#else
2569static inline struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
2570{
2571	return NULL;
2572}
2573static inline int in_gate_area_no_mm(unsigned long addr) { return 0; }
2574static inline int in_gate_area(struct mm_struct *mm, unsigned long addr)
2575{
2576	return 0;
2577}
2578#endif	/* __HAVE_ARCH_GATE_AREA */
2579
2580extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
2581
2582#ifdef CONFIG_SYSCTL
2583extern int sysctl_drop_caches;
2584int drop_caches_sysctl_handler(struct ctl_table *, int,
2585					void __user *, size_t *, loff_t *);
2586#endif
2587
2588void drop_slab(void);
2589void drop_slab_node(int nid);
 
2590
2591#ifndef CONFIG_MMU
2592#define randomize_va_space 0
2593#else
2594extern int randomize_va_space;
2595#endif
2596
2597const char * arch_vma_name(struct vm_area_struct *vma);
2598void print_vma_addr(char *prefix, unsigned long rip);
2599
2600void sparse_mem_maps_populate_node(struct page **map_map,
2601				   unsigned long pnum_begin,
2602				   unsigned long pnum_end,
2603				   unsigned long map_count,
2604				   int nodeid);
2605
2606struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
2607		struct vmem_altmap *altmap);
2608pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
2609p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node);
2610pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node);
2611pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
2612pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
2613void *vmemmap_alloc_block(unsigned long size, int node);
2614struct vmem_altmap;
2615void *vmemmap_alloc_block_buf(unsigned long size, int node);
2616void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap);
2617void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
2618int vmemmap_populate_basepages(unsigned long start, unsigned long end,
2619			       int node);
2620int vmemmap_populate(unsigned long start, unsigned long end, int node,
2621		struct vmem_altmap *altmap);
2622void vmemmap_populate_print_last(void);
2623#ifdef CONFIG_MEMORY_HOTPLUG
2624void vmemmap_free(unsigned long start, unsigned long end,
2625		struct vmem_altmap *altmap);
2626#endif
2627void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
2628				  unsigned long nr_pages);
2629
2630enum mf_flags {
2631	MF_COUNT_INCREASED = 1 << 0,
2632	MF_ACTION_REQUIRED = 1 << 1,
2633	MF_MUST_KILL = 1 << 2,
2634	MF_SOFT_OFFLINE = 1 << 3,
2635};
2636extern int memory_failure(unsigned long pfn, int flags);
2637extern void memory_failure_queue(unsigned long pfn, int flags);
2638extern int unpoison_memory(unsigned long pfn);
2639extern int get_hwpoison_page(struct page *page);
2640#define put_hwpoison_page(page)	put_page(page)
2641extern int sysctl_memory_failure_early_kill;
2642extern int sysctl_memory_failure_recovery;
2643extern void shake_page(struct page *p, int access);
2644extern atomic_long_t num_poisoned_pages __read_mostly;
2645extern int soft_offline_page(struct page *page, int flags);
2646
2647
2648/*
2649 * Error handlers for various types of pages.
2650 */
2651enum mf_result {
2652	MF_IGNORED,	/* Error: cannot be handled */
2653	MF_FAILED,	/* Error: handling failed */
2654	MF_DELAYED,	/* Will be handled later */
2655	MF_RECOVERED,	/* Successfully recovered */
2656};
2657
2658enum mf_action_page_type {
2659	MF_MSG_KERNEL,
2660	MF_MSG_KERNEL_HIGH_ORDER,
2661	MF_MSG_SLAB,
2662	MF_MSG_DIFFERENT_COMPOUND,
2663	MF_MSG_POISONED_HUGE,
2664	MF_MSG_HUGE,
2665	MF_MSG_FREE_HUGE,
2666	MF_MSG_NON_PMD_HUGE,
2667	MF_MSG_UNMAP_FAILED,
2668	MF_MSG_DIRTY_SWAPCACHE,
2669	MF_MSG_CLEAN_SWAPCACHE,
2670	MF_MSG_DIRTY_MLOCKED_LRU,
2671	MF_MSG_CLEAN_MLOCKED_LRU,
2672	MF_MSG_DIRTY_UNEVICTABLE_LRU,
2673	MF_MSG_CLEAN_UNEVICTABLE_LRU,
2674	MF_MSG_DIRTY_LRU,
2675	MF_MSG_CLEAN_LRU,
2676	MF_MSG_TRUNCATED_LRU,
2677	MF_MSG_BUDDY,
2678	MF_MSG_BUDDY_2ND,
2679	MF_MSG_UNKNOWN,
2680};
2681
2682#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
2683extern void clear_huge_page(struct page *page,
2684			    unsigned long addr_hint,
2685			    unsigned int pages_per_huge_page);
2686extern void copy_user_huge_page(struct page *dst, struct page *src,
2687				unsigned long addr, struct vm_area_struct *vma,
2688				unsigned int pages_per_huge_page);
2689extern long copy_huge_page_from_user(struct page *dst_page,
2690				const void __user *usr_src,
2691				unsigned int pages_per_huge_page,
2692				bool allow_pagefault);
2693#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
2694
2695extern struct page_ext_operations debug_guardpage_ops;
2696
2697#ifdef CONFIG_DEBUG_PAGEALLOC
2698extern unsigned int _debug_guardpage_minorder;
2699extern bool _debug_guardpage_enabled;
2700
2701static inline unsigned int debug_guardpage_minorder(void)
2702{
2703	return _debug_guardpage_minorder;
2704}
2705
2706static inline bool debug_guardpage_enabled(void)
2707{
2708	return _debug_guardpage_enabled;
2709}
2710
2711static inline bool page_is_guard(struct page *page)
2712{
2713	struct page_ext *page_ext;
2714
2715	if (!debug_guardpage_enabled())
2716		return false;
2717
2718	page_ext = lookup_page_ext(page);
2719	if (unlikely(!page_ext))
2720		return false;
2721
2722	return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
2723}
2724#else
2725static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2726static inline bool debug_guardpage_enabled(void) { return false; }
2727static inline bool page_is_guard(struct page *page) { return false; }
2728#endif /* CONFIG_DEBUG_PAGEALLOC */
2729
2730#if MAX_NUMNODES > 1
2731void __init setup_nr_node_ids(void);
2732#else
2733static inline void setup_nr_node_ids(void) {}
2734#endif
2735
2736#endif /* __KERNEL__ */
2737#endif /* _LINUX_MM_H */