Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_HUGETLB_H
   3#define _LINUX_HUGETLB_H
   4
   5#include <linux/mm_types.h>
   6#include <linux/mmdebug.h>
   7#include <linux/fs.h>
   8#include <linux/hugetlb_inline.h>
   9#include <linux/cgroup.h>
  10#include <linux/list.h>
  11#include <linux/kref.h>
  12#include <linux/pgtable.h>
  13#include <linux/gfp.h>
  14#include <linux/userfaultfd_k.h>
  15
  16struct ctl_table;
  17struct user_struct;
  18struct mmu_gather;
  19
  20#ifndef is_hugepd
 
 
 
 
 
 
 
  21typedef struct { unsigned long pd; } hugepd_t;
  22#define is_hugepd(hugepd) (0)
  23#define __hugepd(x) ((hugepd_t) { (x) })
 
 
 
 
 
 
 
 
 
 
  24#endif
  25
 
  26#ifdef CONFIG_HUGETLB_PAGE
  27
  28#include <linux/mempolicy.h>
  29#include <linux/shm.h>
  30#include <asm/tlbflush.h>
  31
  32/*
  33 * For HugeTLB page, there are more metadata to save in the struct page. But
  34 * the head struct page cannot meet our needs, so we have to abuse other tail
  35 * struct page to store the metadata. In order to avoid conflicts caused by
  36 * subsequent use of more tail struct pages, we gather these discrete indexes
  37 * of tail struct page here.
  38 */
  39enum {
  40	SUBPAGE_INDEX_SUBPOOL = 1,	/* reuse page->private */
  41#ifdef CONFIG_CGROUP_HUGETLB
  42	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
  43	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
  44	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
  45#endif
  46	__NR_USED_SUBPAGE,
  47};
  48
  49struct hugepage_subpool {
  50	spinlock_t lock;
  51	long count;
  52	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
  53	long used_hpages;	/* Used count against maximum, includes */
  54				/* both allocated and reserved pages. */
  55	struct hstate *hstate;
  56	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
  57	long rsv_hpages;	/* Pages reserved against global pool to */
  58				/* satisfy minimum size. */
  59};
  60
  61struct resv_map {
  62	struct kref refs;
  63	spinlock_t lock;
  64	struct list_head regions;
  65	long adds_in_progress;
  66	struct list_head region_cache;
  67	long region_cache_count;
  68#ifdef CONFIG_CGROUP_HUGETLB
  69	/*
  70	 * On private mappings, the counter to uncharge reservations is stored
  71	 * here. If these fields are 0, then either the mapping is shared, or
  72	 * cgroup accounting is disabled for this resv_map.
  73	 */
  74	struct page_counter *reservation_counter;
  75	unsigned long pages_per_hpage;
  76	struct cgroup_subsys_state *css;
  77#endif
  78};
  79
  80/*
  81 * Region tracking -- allows tracking of reservations and instantiated pages
  82 *                    across the pages in a mapping.
  83 *
  84 * The region data structures are embedded into a resv_map and protected
  85 * by a resv_map's lock.  The set of regions within the resv_map represent
  86 * reservations for huge pages, or huge pages that have already been
  87 * instantiated within the map.  The from and to elements are huge page
  88 * indices into the associated mapping.  from indicates the starting index
  89 * of the region.  to represents the first index past the end of  the region.
  90 *
  91 * For example, a file region structure with from == 0 and to == 4 represents
  92 * four huge pages in a mapping.  It is important to note that the to element
  93 * represents the first element past the end of the region. This is used in
  94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
  95 *
  96 * Interval notation of the form [from, to) will be used to indicate that
  97 * the endpoint from is inclusive and to is exclusive.
  98 */
  99struct file_region {
 100	struct list_head link;
 101	long from;
 102	long to;
 103#ifdef CONFIG_CGROUP_HUGETLB
 104	/*
 105	 * On shared mappings, each reserved region appears as a struct
 106	 * file_region in resv_map. These fields hold the info needed to
 107	 * uncharge each reservation.
 108	 */
 109	struct page_counter *reservation_counter;
 110	struct cgroup_subsys_state *css;
 111#endif
 112};
 113
 114extern struct resv_map *resv_map_alloc(void);
 115void resv_map_release(struct kref *ref);
 116
 117extern spinlock_t hugetlb_lock;
 118extern int hugetlb_max_hstate __read_mostly;
 119#define for_each_hstate(h) \
 120	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
 121
 122struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
 123						long min_hpages);
 124void hugepage_put_subpool(struct hugepage_subpool *spool);
 125
 126void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
 127int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
 128int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
 129		loff_t *);
 130int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
 131		loff_t *);
 132int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
 133		loff_t *);
 
 134
 135int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 136long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 137			 struct page **, struct vm_area_struct **,
 138			 unsigned long *, unsigned long *, long, unsigned int,
 139			 int *);
 140void unmap_hugepage_range(struct vm_area_struct *,
 141			  unsigned long, unsigned long, struct page *);
 142void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 143			  struct vm_area_struct *vma,
 144			  unsigned long start, unsigned long end,
 145			  struct page *ref_page);
 146void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 147				unsigned long start, unsigned long end,
 148				struct page *ref_page);
 149void hugetlb_report_meminfo(struct seq_file *);
 150int hugetlb_report_node_meminfo(char *buf, int len, int nid);
 151void hugetlb_show_meminfo(void);
 152unsigned long hugetlb_total_pages(void);
 153vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 154			unsigned long address, unsigned int flags);
 155#ifdef CONFIG_USERFAULTFD
 156int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
 157				struct vm_area_struct *dst_vma,
 158				unsigned long dst_addr,
 159				unsigned long src_addr,
 160				enum mcopy_atomic_mode mode,
 161				struct page **pagep);
 162#endif /* CONFIG_USERFAULTFD */
 163bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
 164						struct vm_area_struct *vma,
 165						vm_flags_t vm_flags);
 166long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
 167						long freed);
 168bool isolate_huge_page(struct page *page, struct list_head *list);
 169int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
 170void putback_active_hugepage(struct page *page);
 171void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 172void free_huge_page(struct page *page);
 173void hugetlb_fix_reserve_counts(struct inode *inode);
 174extern struct mutex *hugetlb_fault_mutex_table;
 175u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
 176
 177pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 178		      unsigned long addr, pud_t *pud);
 179
 180struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
 181
 182extern int sysctl_hugetlb_shm_group;
 183extern struct list_head huge_boot_pages;
 184
 185/* arch callbacks */
 186
 187pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 188			unsigned long addr, unsigned long sz);
 189pte_t *huge_pte_offset(struct mm_struct *mm,
 190		       unsigned long addr, unsigned long sz);
 191int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
 192				unsigned long *addr, pte_t *ptep);
 193void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
 194				unsigned long *start, unsigned long *end);
 195struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 196			      int write);
 197struct page *follow_huge_pd(struct vm_area_struct *vma,
 198			    unsigned long address, hugepd_t hpd,
 199			    int flags, int pdshift);
 200struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 201				pmd_t *pmd, int flags);
 202struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 203				pud_t *pud, int flags);
 204struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
 205			     pgd_t *pgd, int flags);
 206
 207int pmd_huge(pmd_t pmd);
 208int pud_huge(pud_t pud);
 209unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 210		unsigned long address, unsigned long end, pgprot_t newprot);
 211
 212bool is_hugetlb_entry_migration(pte_t pte);
 213void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
 214
 215#else /* !CONFIG_HUGETLB_PAGE */
 216
 217static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 218{
 219}
 220
 221static inline unsigned long hugetlb_total_pages(void)
 222{
 223	return 0;
 224}
 225
 226static inline struct address_space *hugetlb_page_mapping_lock_write(
 227							struct page *hpage)
 228{
 229	return NULL;
 230}
 231
 232static inline int huge_pmd_unshare(struct mm_struct *mm,
 233					struct vm_area_struct *vma,
 234					unsigned long *addr, pte_t *ptep)
 235{
 236	return 0;
 237}
 238
 239static inline void adjust_range_if_pmd_sharing_possible(
 240				struct vm_area_struct *vma,
 241				unsigned long *start, unsigned long *end)
 242{
 243}
 244
 245static inline long follow_hugetlb_page(struct mm_struct *mm,
 246			struct vm_area_struct *vma, struct page **pages,
 247			struct vm_area_struct **vmas, unsigned long *position,
 248			unsigned long *nr_pages, long i, unsigned int flags,
 249			int *nonblocking)
 250{
 251	BUG();
 252	return 0;
 253}
 254
 255static inline struct page *follow_huge_addr(struct mm_struct *mm,
 256					unsigned long address, int write)
 257{
 258	return ERR_PTR(-EINVAL);
 259}
 260
 261static inline int copy_hugetlb_page_range(struct mm_struct *dst,
 262			struct mm_struct *src, struct vm_area_struct *vma)
 263{
 264	BUG();
 265	return 0;
 266}
 267
 268static inline void hugetlb_report_meminfo(struct seq_file *m)
 269{
 270}
 271
 272static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
 273{
 274	return 0;
 275}
 276
 277static inline void hugetlb_show_meminfo(void)
 278{
 279}
 280
 281static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
 282				unsigned long address, hugepd_t hpd, int flags,
 283				int pdshift)
 284{
 285	return NULL;
 286}
 287
 288static inline struct page *follow_huge_pmd(struct mm_struct *mm,
 289				unsigned long address, pmd_t *pmd, int flags)
 290{
 291	return NULL;
 292}
 293
 294static inline struct page *follow_huge_pud(struct mm_struct *mm,
 295				unsigned long address, pud_t *pud, int flags)
 296{
 297	return NULL;
 298}
 299
 300static inline struct page *follow_huge_pgd(struct mm_struct *mm,
 301				unsigned long address, pgd_t *pgd, int flags)
 302{
 303	return NULL;
 304}
 305
 306static inline int prepare_hugepage_range(struct file *file,
 307				unsigned long addr, unsigned long len)
 308{
 309	return -EINVAL;
 310}
 311
 312static inline int pmd_huge(pmd_t pmd)
 313{
 314	return 0;
 315}
 316
 317static inline int pud_huge(pud_t pud)
 318{
 319	return 0;
 320}
 321
 322static inline int is_hugepage_only_range(struct mm_struct *mm,
 323					unsigned long addr, unsigned long len)
 324{
 325	return 0;
 326}
 327
 328static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 329				unsigned long addr, unsigned long end,
 330				unsigned long floor, unsigned long ceiling)
 331{
 332	BUG();
 333}
 334
 335#ifdef CONFIG_USERFAULTFD
 336static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 337						pte_t *dst_pte,
 338						struct vm_area_struct *dst_vma,
 339						unsigned long dst_addr,
 340						unsigned long src_addr,
 341						enum mcopy_atomic_mode mode,
 342						struct page **pagep)
 343{
 344	BUG();
 345	return 0;
 346}
 347#endif /* CONFIG_USERFAULTFD */
 348
 349static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
 350					unsigned long sz)
 351{
 352	return NULL;
 353}
 354
 355static inline bool isolate_huge_page(struct page *page, struct list_head *list)
 356{
 357	return false;
 358}
 
 
 359
 360static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
 361{
 362	return 0;
 363}
 364
 365static inline void putback_active_hugepage(struct page *page)
 366{
 367}
 368
 369static inline void move_hugetlb_state(struct page *oldpage,
 370					struct page *newpage, int reason)
 371{
 372}
 373
 374static inline unsigned long hugetlb_change_protection(
 375			struct vm_area_struct *vma, unsigned long address,
 376			unsigned long end, pgprot_t newprot)
 377{
 378	return 0;
 379}
 380
 381static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 382			struct vm_area_struct *vma, unsigned long start,
 383			unsigned long end, struct page *ref_page)
 384{
 385	BUG();
 386}
 387
 388static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 389			struct vm_area_struct *vma, unsigned long start,
 390			unsigned long end, struct page *ref_page)
 391{
 392	BUG();
 393}
 394
 395static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
 396			struct vm_area_struct *vma, unsigned long address,
 397			unsigned int flags)
 398{
 399	BUG();
 400	return 0;
 401}
 402
 403static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
 404
 405#endif /* !CONFIG_HUGETLB_PAGE */
 406/*
 407 * hugepages at page global directory. If arch support
 408 * hugepages at pgd level, they need to define this.
 409 */
 410#ifndef pgd_huge
 411#define pgd_huge(x)	0
 412#endif
 413#ifndef p4d_huge
 414#define p4d_huge(x)	0
 415#endif
 416
 417#ifndef pgd_write
 418static inline int pgd_write(pgd_t pgd)
 419{
 420	BUG();
 421	return 0;
 422}
 423#endif
 424
 425#define HUGETLB_ANON_FILE "anon_hugepage"
 426
 427enum {
 428	/*
 429	 * The file will be used as an shm file so shmfs accounting rules
 430	 * apply
 431	 */
 432	HUGETLB_SHMFS_INODE     = 1,
 433	/*
 434	 * The file is being created on the internal vfs mount and shmfs
 435	 * accounting rules do not apply
 436	 */
 437	HUGETLB_ANONHUGE_INODE  = 2,
 438};
 439
 440#ifdef CONFIG_HUGETLBFS
 441struct hugetlbfs_sb_info {
 442	long	max_inodes;   /* inodes allowed */
 443	long	free_inodes;  /* inodes free */
 444	spinlock_t	stat_lock;
 445	struct hstate *hstate;
 446	struct hugepage_subpool *spool;
 447	kuid_t	uid;
 448	kgid_t	gid;
 449	umode_t mode;
 450};
 451
 452static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 453{
 454	return sb->s_fs_info;
 455}
 456
 457struct hugetlbfs_inode_info {
 458	struct shared_policy policy;
 459	struct inode vfs_inode;
 460	unsigned int seals;
 461};
 462
 463static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
 464{
 465	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
 466}
 467
 468extern const struct file_operations hugetlbfs_file_operations;
 469extern const struct vm_operations_struct hugetlb_vm_ops;
 470struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 471				struct ucounts **ucounts, int creat_flags,
 472				int page_size_log);
 473
 474static inline bool is_file_hugepages(struct file *file)
 475{
 476	if (file->f_op == &hugetlbfs_file_operations)
 477		return true;
 478
 479	return is_file_shm_hugepages(file);
 480}
 481
 482static inline struct hstate *hstate_inode(struct inode *i)
 483{
 484	return HUGETLBFS_SB(i->i_sb)->hstate;
 485}
 486#else /* !CONFIG_HUGETLBFS */
 487
 488#define is_file_hugepages(file)			false
 489static inline struct file *
 490hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
 491		struct ucounts **ucounts, int creat_flags,
 492		int page_size_log)
 493{
 494	return ERR_PTR(-ENOSYS);
 495}
 496
 497static inline struct hstate *hstate_inode(struct inode *i)
 498{
 499	return NULL;
 500}
 501#endif /* !CONFIG_HUGETLBFS */
 502
 503#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 504unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 505					unsigned long len, unsigned long pgoff,
 506					unsigned long flags);
 507#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 508
 509/*
 510 * huegtlb page specific state flags.  These flags are located in page.private
 511 * of the hugetlb head page.  Functions created via the below macros should be
 512 * used to manipulate these flags.
 513 *
 514 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
 515 *	allocation time.  Cleared when page is fully instantiated.  Free
 516 *	routine checks flag to restore a reservation on error paths.
 517 *	Synchronization:  Examined or modified by code that knows it has
 518 *	the only reference to page.  i.e. After allocation but before use
 519 *	or when the page is being freed.
 520 * HPG_migratable  - Set after a newly allocated page is added to the page
 521 *	cache and/or page tables.  Indicates the page is a candidate for
 522 *	migration.
 523 *	Synchronization:  Initially set after new page allocation with no
 524 *	locking.  When examined and modified during migration processing
 525 *	(isolate, migrate, putback) the hugetlb_lock is held.
 526 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
 527 *	allocator.  Typically used for migration target pages when no pages
 528 *	are available in the pool.  The hugetlb free page path will
 529 *	immediately free pages with this flag set to the buddy allocator.
 530 *	Synchronization: Can be set after huge page allocation from buddy when
 531 *	code knows it has only reference.  All other examinations and
 532 *	modifications require hugetlb_lock.
 533 * HPG_freed - Set when page is on the free lists.
 534 *	Synchronization: hugetlb_lock held for examination and modification.
 535 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
 536 */
 537enum hugetlb_page_flags {
 538	HPG_restore_reserve = 0,
 539	HPG_migratable,
 540	HPG_temporary,
 541	HPG_freed,
 542	HPG_vmemmap_optimized,
 543	__NR_HPAGEFLAGS,
 544};
 545
 546/*
 547 * Macros to create test, set and clear function definitions for
 548 * hugetlb specific page flags.
 549 */
 550#ifdef CONFIG_HUGETLB_PAGE
 551#define TESTHPAGEFLAG(uname, flname)				\
 552static inline int HPage##uname(struct page *page)		\
 553	{ return test_bit(HPG_##flname, &(page->private)); }
 554
 555#define SETHPAGEFLAG(uname, flname)				\
 556static inline void SetHPage##uname(struct page *page)		\
 557	{ set_bit(HPG_##flname, &(page->private)); }
 558
 559#define CLEARHPAGEFLAG(uname, flname)				\
 560static inline void ClearHPage##uname(struct page *page)		\
 561	{ clear_bit(HPG_##flname, &(page->private)); }
 562#else
 563#define TESTHPAGEFLAG(uname, flname)				\
 564static inline int HPage##uname(struct page *page)		\
 565	{ return 0; }
 566
 567#define SETHPAGEFLAG(uname, flname)				\
 568static inline void SetHPage##uname(struct page *page)		\
 569	{ }
 570
 571#define CLEARHPAGEFLAG(uname, flname)				\
 572static inline void ClearHPage##uname(struct page *page)		\
 573	{ }
 574#endif
 575
 576#define HPAGEFLAG(uname, flname)				\
 577	TESTHPAGEFLAG(uname, flname)				\
 578	SETHPAGEFLAG(uname, flname)				\
 579	CLEARHPAGEFLAG(uname, flname)				\
 580
 581/*
 582 * Create functions associated with hugetlb page flags
 583 */
 584HPAGEFLAG(RestoreReserve, restore_reserve)
 585HPAGEFLAG(Migratable, migratable)
 586HPAGEFLAG(Temporary, temporary)
 587HPAGEFLAG(Freed, freed)
 588HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
 589
 590#ifdef CONFIG_HUGETLB_PAGE
 591
 592#define HSTATE_NAME_LEN 32
 593/* Defines one hugetlb page size */
 594struct hstate {
 595	struct mutex resize_lock;
 596	int next_nid_to_alloc;
 597	int next_nid_to_free;
 598	unsigned int order;
 599	unsigned long mask;
 600	unsigned long max_huge_pages;
 601	unsigned long nr_huge_pages;
 602	unsigned long free_huge_pages;
 603	unsigned long resv_huge_pages;
 604	unsigned long surplus_huge_pages;
 605	unsigned long nr_overcommit_huge_pages;
 606	struct list_head hugepage_activelist;
 607	struct list_head hugepage_freelists[MAX_NUMNODES];
 608	unsigned int nr_huge_pages_node[MAX_NUMNODES];
 609	unsigned int free_huge_pages_node[MAX_NUMNODES];
 610	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 611#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
 612	unsigned int nr_free_vmemmap_pages;
 613#endif
 614#ifdef CONFIG_CGROUP_HUGETLB
 615	/* cgroup control files */
 616	struct cftype cgroup_files_dfl[7];
 617	struct cftype cgroup_files_legacy[9];
 618#endif
 619	char name[HSTATE_NAME_LEN];
 620};
 621
 622struct huge_bootmem_page {
 623	struct list_head list;
 624	struct hstate *hstate;
 
 
 
 625};
 626
 627int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
 628struct page *alloc_huge_page(struct vm_area_struct *vma,
 629				unsigned long addr, int avoid_reserve);
 
 630struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 631				nodemask_t *nmask, gfp_t gfp_mask);
 632struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
 633				unsigned long address);
 634int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 635			pgoff_t idx);
 636void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
 637				unsigned long address, struct page *page);
 638
 639/* arch callback */
 640int __init __alloc_bootmem_huge_page(struct hstate *h);
 641int __init alloc_bootmem_huge_page(struct hstate *h);
 642
 
 643void __init hugetlb_add_hstate(unsigned order);
 644bool __init arch_hugetlb_valid_size(unsigned long size);
 645struct hstate *size_to_hstate(unsigned long size);
 646
 647#ifndef HUGE_MAX_HSTATE
 648#define HUGE_MAX_HSTATE 1
 649#endif
 650
 651extern struct hstate hstates[HUGE_MAX_HSTATE];
 652extern unsigned int default_hstate_idx;
 653
 654#define default_hstate (hstates[default_hstate_idx])
 655
 656/*
 657 * hugetlb page subpool pointer located in hpage[1].private
 658 */
 659static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
 660{
 661	return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
 662}
 663
 664static inline void hugetlb_set_page_subpool(struct page *hpage,
 665					struct hugepage_subpool *subpool)
 666{
 667	set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
 668}
 669
 670static inline struct hstate *hstate_file(struct file *f)
 671{
 672	return hstate_inode(file_inode(f));
 673}
 674
 675static inline struct hstate *hstate_sizelog(int page_size_log)
 676{
 677	if (!page_size_log)
 678		return &default_hstate;
 679
 680	return size_to_hstate(1UL << page_size_log);
 681}
 682
 683static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 684{
 685	return hstate_file(vma->vm_file);
 686}
 687
 688static inline unsigned long huge_page_size(struct hstate *h)
 689{
 690	return (unsigned long)PAGE_SIZE << h->order;
 691}
 692
 693extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
 694
 695extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
 696
 697static inline unsigned long huge_page_mask(struct hstate *h)
 698{
 699	return h->mask;
 700}
 701
 702static inline unsigned int huge_page_order(struct hstate *h)
 703{
 704	return h->order;
 705}
 706
 707static inline unsigned huge_page_shift(struct hstate *h)
 708{
 709	return h->order + PAGE_SHIFT;
 710}
 711
 712static inline bool hstate_is_gigantic(struct hstate *h)
 713{
 714	return huge_page_order(h) >= MAX_ORDER;
 715}
 716
 717static inline unsigned int pages_per_huge_page(struct hstate *h)
 718{
 719	return 1 << h->order;
 720}
 721
 722static inline unsigned int blocks_per_huge_page(struct hstate *h)
 723{
 724	return huge_page_size(h) / 512;
 725}
 726
 727#include <asm/hugetlb.h>
 728
 729#ifndef is_hugepage_only_range
 730static inline int is_hugepage_only_range(struct mm_struct *mm,
 731					unsigned long addr, unsigned long len)
 732{
 733	return 0;
 734}
 735#define is_hugepage_only_range is_hugepage_only_range
 736#endif
 737
 738#ifndef arch_clear_hugepage_flags
 739static inline void arch_clear_hugepage_flags(struct page *page) { }
 740#define arch_clear_hugepage_flags arch_clear_hugepage_flags
 741#endif
 742
 743#ifndef arch_make_huge_pte
 744static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
 745				       vm_flags_t flags)
 746{
 747	return entry;
 748}
 749#endif
 750
 751static inline struct hstate *page_hstate(struct page *page)
 752{
 753	VM_BUG_ON_PAGE(!PageHuge(page), page);
 754	return size_to_hstate(page_size(page));
 755}
 756
 757static inline unsigned hstate_index_to_shift(unsigned index)
 758{
 759	return hstates[index].order + PAGE_SHIFT;
 760}
 761
 762static inline int hstate_index(struct hstate *h)
 763{
 764	return h - hstates;
 765}
 766
 
 
 
 
 
 
 
 
 
 
 
 767extern int dissolve_free_huge_page(struct page *page);
 768extern int dissolve_free_huge_pages(unsigned long start_pfn,
 769				    unsigned long end_pfn);
 770
 771#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 772#ifndef arch_hugetlb_migration_supported
 773static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 774{
 
 775	if ((huge_page_shift(h) == PMD_SHIFT) ||
 776		(huge_page_shift(h) == PUD_SHIFT) ||
 777			(huge_page_shift(h) == PGDIR_SHIFT))
 778		return true;
 779	else
 780		return false;
 781}
 782#endif
 783#else
 784static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 785{
 786	return false;
 787}
 788#endif
 789
 790static inline bool hugepage_migration_supported(struct hstate *h)
 791{
 792	return arch_hugetlb_migration_supported(h);
 793}
 794
 795/*
 796 * Movability check is different as compared to migration check.
 797 * It determines whether or not a huge page should be placed on
 798 * movable zone or not. Movability of any huge page should be
 799 * required only if huge page size is supported for migration.
 800 * There won't be any reason for the huge page to be movable if
 801 * it is not migratable to start with. Also the size of the huge
 802 * page should be large enough to be placed under a movable zone
 803 * and still feasible enough to be migratable. Just the presence
 804 * in movable zone does not make the migration feasible.
 805 *
 806 * So even though large huge page sizes like the gigantic ones
 807 * are migratable they should not be movable because its not
 808 * feasible to migrate them from movable zone.
 809 */
 810static inline bool hugepage_movable_supported(struct hstate *h)
 811{
 812	if (!hugepage_migration_supported(h))
 813		return false;
 814
 815	if (hstate_is_gigantic(h))
 816		return false;
 817	return true;
 818}
 819
 820/* Movability of hugepages depends on migration support. */
 821static inline gfp_t htlb_alloc_mask(struct hstate *h)
 822{
 823	if (hugepage_movable_supported(h))
 824		return GFP_HIGHUSER_MOVABLE;
 825	else
 826		return GFP_HIGHUSER;
 827}
 828
 829static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
 830{
 831	gfp_t modified_mask = htlb_alloc_mask(h);
 832
 833	/* Some callers might want to enforce node */
 834	modified_mask |= (gfp_mask & __GFP_THISNODE);
 835
 836	modified_mask |= (gfp_mask & __GFP_NOWARN);
 837
 838	return modified_mask;
 839}
 840
 841static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 842					   struct mm_struct *mm, pte_t *pte)
 843{
 844	if (huge_page_size(h) == PMD_SIZE)
 845		return pmd_lockptr(mm, (pmd_t *) pte);
 846	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
 847	return &mm->page_table_lock;
 848}
 849
 850#ifndef hugepages_supported
 851/*
 852 * Some platform decide whether they support huge pages at boot
 853 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 854 * when there is no such support
 855 */
 856#define hugepages_supported() (HPAGE_SHIFT != 0)
 857#endif
 858
 859void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
 860
 861static inline void hugetlb_count_init(struct mm_struct *mm)
 862{
 863	atomic_long_set(&mm->hugetlb_usage, 0);
 864}
 865
 866static inline void hugetlb_count_add(long l, struct mm_struct *mm)
 867{
 868	atomic_long_add(l, &mm->hugetlb_usage);
 869}
 870
 871static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 872{
 873	atomic_long_sub(l, &mm->hugetlb_usage);
 874}
 875
 876#ifndef set_huge_swap_pte_at
 877static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 878					pte_t *ptep, pte_t pte, unsigned long sz)
 879{
 880	set_huge_pte_at(mm, addr, ptep, pte);
 881}
 882#endif
 883
 884#ifndef huge_ptep_modify_prot_start
 885#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
 886static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
 887						unsigned long addr, pte_t *ptep)
 888{
 889	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
 890}
 891#endif
 892
 893#ifndef huge_ptep_modify_prot_commit
 894#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
 895static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
 896						unsigned long addr, pte_t *ptep,
 897						pte_t old_pte, pte_t pte)
 898{
 899	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
 900}
 901#endif
 902
 903#else	/* CONFIG_HUGETLB_PAGE */
 904struct hstate {};
 905
 906static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
 907{
 908	return NULL;
 909}
 910
 911static inline int isolate_or_dissolve_huge_page(struct page *page,
 912						struct list_head *list)
 913{
 914	return -ENOMEM;
 915}
 916
 917static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
 918					   unsigned long addr,
 919					   int avoid_reserve)
 920{
 921	return NULL;
 922}
 923
 924static inline struct page *
 925alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 926			nodemask_t *nmask, gfp_t gfp_mask)
 927{
 928	return NULL;
 929}
 930
 931static inline struct page *alloc_huge_page_vma(struct hstate *h,
 932					       struct vm_area_struct *vma,
 933					       unsigned long address)
 934{
 935	return NULL;
 936}
 937
 938static inline int __alloc_bootmem_huge_page(struct hstate *h)
 939{
 940	return 0;
 941}
 942
 943static inline struct hstate *hstate_file(struct file *f)
 944{
 945	return NULL;
 946}
 947
 948static inline struct hstate *hstate_sizelog(int page_size_log)
 949{
 950	return NULL;
 951}
 952
 953static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 954{
 955	return NULL;
 956}
 957
 958static inline struct hstate *page_hstate(struct page *page)
 959{
 960	return NULL;
 961}
 962
 963static inline unsigned long huge_page_size(struct hstate *h)
 964{
 965	return PAGE_SIZE;
 966}
 967
 968static inline unsigned long huge_page_mask(struct hstate *h)
 969{
 970	return PAGE_MASK;
 971}
 972
 973static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 974{
 975	return PAGE_SIZE;
 976}
 977
 978static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 979{
 980	return PAGE_SIZE;
 981}
 982
 983static inline unsigned int huge_page_order(struct hstate *h)
 984{
 985	return 0;
 986}
 987
 988static inline unsigned int huge_page_shift(struct hstate *h)
 989{
 990	return PAGE_SHIFT;
 991}
 992
 993static inline bool hstate_is_gigantic(struct hstate *h)
 994{
 995	return false;
 996}
 997
 998static inline unsigned int pages_per_huge_page(struct hstate *h)
 999{
1000	return 1;
1001}
1002
1003static inline unsigned hstate_index_to_shift(unsigned index)
1004{
1005	return 0;
1006}
1007
1008static inline int hstate_index(struct hstate *h)
1009{
1010	return 0;
1011}
1012
 
 
 
 
 
1013static inline int dissolve_free_huge_page(struct page *page)
1014{
1015	return 0;
1016}
1017
1018static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1019					   unsigned long end_pfn)
1020{
1021	return 0;
1022}
1023
1024static inline bool hugepage_migration_supported(struct hstate *h)
1025{
1026	return false;
1027}
1028
1029static inline bool hugepage_movable_supported(struct hstate *h)
1030{
1031	return false;
1032}
1033
1034static inline gfp_t htlb_alloc_mask(struct hstate *h)
1035{
1036	return 0;
1037}
1038
1039static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1040{
1041	return 0;
1042}
1043
1044static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1045					   struct mm_struct *mm, pte_t *pte)
1046{
1047	return &mm->page_table_lock;
1048}
1049
1050static inline void hugetlb_count_init(struct mm_struct *mm)
1051{
1052}
1053
1054static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1055{
1056}
1057
1058static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1059{
1060}
1061
1062static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1063					pte_t *ptep, pte_t pte, unsigned long sz)
1064{
1065}
1066#endif	/* CONFIG_HUGETLB_PAGE */
1067
1068#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1069extern bool hugetlb_free_vmemmap_enabled;
1070#else
1071#define hugetlb_free_vmemmap_enabled	false
1072#endif
1073
1074static inline spinlock_t *huge_pte_lock(struct hstate *h,
1075					struct mm_struct *mm, pte_t *pte)
1076{
1077	spinlock_t *ptl;
1078
1079	ptl = huge_pte_lockptr(h, mm, pte);
1080	spin_lock(ptl);
1081	return ptl;
1082}
1083
1084#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1085extern void __init hugetlb_cma_reserve(int order);
1086extern void __init hugetlb_cma_check(void);
1087#else
1088static inline __init void hugetlb_cma_reserve(int order)
1089{
1090}
1091static inline __init void hugetlb_cma_check(void)
1092{
1093}
1094#endif
1095
1096bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1097
1098#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1099/*
1100 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1101 * implement this.
1102 */
1103#define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1104#endif
1105
1106#endif /* _LINUX_HUGETLB_H */
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#ifndef _LINUX_HUGETLB_H
  3#define _LINUX_HUGETLB_H
  4
  5#include <linux/mm_types.h>
  6#include <linux/mmdebug.h>
  7#include <linux/fs.h>
  8#include <linux/hugetlb_inline.h>
  9#include <linux/cgroup.h>
 10#include <linux/list.h>
 11#include <linux/kref.h>
 12#include <asm/pgtable.h>
 
 
 13
 14struct ctl_table;
 15struct user_struct;
 16struct mmu_gather;
 17
 18#ifndef is_hugepd
 19/*
 20 * Some architectures requires a hugepage directory format that is
 21 * required to support multiple hugepage sizes. For example
 22 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
 23 * introduced the same on powerpc. This allows for a more flexible hugepage
 24 * pagetable layout.
 25 */
 26typedef struct { unsigned long pd; } hugepd_t;
 27#define is_hugepd(hugepd) (0)
 28#define __hugepd(x) ((hugepd_t) { (x) })
 29static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 30			      unsigned pdshift, unsigned long end,
 31			      int write, struct page **pages, int *nr)
 32{
 33	return 0;
 34}
 35#else
 36extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
 37		       unsigned pdshift, unsigned long end,
 38		       int write, struct page **pages, int *nr);
 39#endif
 40
 41
 42#ifdef CONFIG_HUGETLB_PAGE
 43
 44#include <linux/mempolicy.h>
 45#include <linux/shm.h>
 46#include <asm/tlbflush.h>
 47
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48struct hugepage_subpool {
 49	spinlock_t lock;
 50	long count;
 51	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
 52	long used_hpages;	/* Used count against maximum, includes */
 53				/* both alloced and reserved pages. */
 54	struct hstate *hstate;
 55	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
 56	long rsv_hpages;	/* Pages reserved against global pool to */
 57				/* sasitfy minimum size. */
 58};
 59
 60struct resv_map {
 61	struct kref refs;
 62	spinlock_t lock;
 63	struct list_head regions;
 64	long adds_in_progress;
 65	struct list_head region_cache;
 66	long region_cache_count;
 
 
 
 
 
 
 
 
 
 
 67};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 68extern struct resv_map *resv_map_alloc(void);
 69void resv_map_release(struct kref *ref);
 70
 71extern spinlock_t hugetlb_lock;
 72extern int hugetlb_max_hstate __read_mostly;
 73#define for_each_hstate(h) \
 74	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
 75
 76struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
 77						long min_hpages);
 78void hugepage_put_subpool(struct hugepage_subpool *spool);
 79
 80void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
 81int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
 82int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
 83int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
 84
 85#ifdef CONFIG_NUMA
 86int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
 87					void __user *, size_t *, loff_t *);
 88#endif
 89
 90int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 91long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 92			 struct page **, struct vm_area_struct **,
 93			 unsigned long *, unsigned long *, long, unsigned int,
 94			 int *);
 95void unmap_hugepage_range(struct vm_area_struct *,
 96			  unsigned long, unsigned long, struct page *);
 97void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 98			  struct vm_area_struct *vma,
 99			  unsigned long start, unsigned long end,
100			  struct page *ref_page);
101void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
102				unsigned long start, unsigned long end,
103				struct page *ref_page);
104void hugetlb_report_meminfo(struct seq_file *);
105int hugetlb_report_node_meminfo(int, char *);
106void hugetlb_show_meminfo(void);
107unsigned long hugetlb_total_pages(void);
108int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
109			unsigned long address, unsigned int flags);
 
110int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
111				struct vm_area_struct *dst_vma,
112				unsigned long dst_addr,
113				unsigned long src_addr,
 
114				struct page **pagep);
115int hugetlb_reserve_pages(struct inode *inode, long from, long to,
 
116						struct vm_area_struct *vma,
117						vm_flags_t vm_flags);
118long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
119						long freed);
120bool isolate_huge_page(struct page *page, struct list_head *list);
 
121void putback_active_hugepage(struct page *page);
122void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
123void free_huge_page(struct page *page);
124void hugetlb_fix_reserve_counts(struct inode *inode);
125extern struct mutex *hugetlb_fault_mutex_table;
126u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm,
127				struct vm_area_struct *vma,
128				struct address_space *mapping,
129				pgoff_t idx, unsigned long address);
130
131pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
132
133extern int sysctl_hugetlb_shm_group;
134extern struct list_head huge_boot_pages;
135
136/* arch callbacks */
137
138pte_t *huge_pte_alloc(struct mm_struct *mm,
139			unsigned long addr, unsigned long sz);
140pte_t *huge_pte_offset(struct mm_struct *mm,
141		       unsigned long addr, unsigned long sz);
142int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep);
 
 
 
143struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
144			      int write);
145struct page *follow_huge_pd(struct vm_area_struct *vma,
146			    unsigned long address, hugepd_t hpd,
147			    int flags, int pdshift);
148struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
149				pmd_t *pmd, int flags);
150struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
151				pud_t *pud, int flags);
152struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
153			     pgd_t *pgd, int flags);
154
155int pmd_huge(pmd_t pmd);
156int pud_huge(pud_t pud);
157unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
158		unsigned long address, unsigned long end, pgprot_t newprot);
159
160bool is_hugetlb_entry_migration(pte_t pte);
 
161
162#else /* !CONFIG_HUGETLB_PAGE */
163
164static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
165{
166}
167
168static inline unsigned long hugetlb_total_pages(void)
169{
170	return 0;
171}
172
173#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n)	({ BUG(); 0; })
174#define follow_huge_addr(mm, addr, write)	ERR_PTR(-EINVAL)
175#define copy_hugetlb_page_range(src, dst, vma)	({ BUG(); 0; })
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
176static inline void hugetlb_report_meminfo(struct seq_file *m)
177{
178}
179#define hugetlb_report_node_meminfo(n, buf)	0
 
 
 
 
 
180static inline void hugetlb_show_meminfo(void)
181{
182}
183#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
184#define follow_huge_pmd(mm, addr, pmd, flags)	NULL
185#define follow_huge_pud(mm, addr, pud, flags)	NULL
186#define follow_huge_pgd(mm, addr, pgd, flags)	NULL
187#define prepare_hugepage_range(file, addr, len)	(-EINVAL)
188#define pmd_huge(x)	0
189#define pud_huge(x)	0
190#define is_hugepage_only_range(mm, addr, len)	0
191#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
192#define hugetlb_fault(mm, vma, addr, flags)	({ BUG(); 0; })
193#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
194				src_addr, pagep)	({ BUG(); 0; })
195#define huge_pte_offset(mm, address, sz)	0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
196
197static inline bool isolate_huge_page(struct page *page, struct list_head *list)
198{
199	return false;
200}
201#define putback_active_hugepage(p)	do {} while (0)
202#define move_hugetlb_state(old, new, reason)	do {} while (0)
203
204static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
205		unsigned long address, unsigned long end, pgprot_t newprot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206{
207	return 0;
208}
209
210static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
211			struct vm_area_struct *vma, unsigned long start,
212			unsigned long end, struct page *ref_page)
213{
214	BUG();
215}
216
217static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
218			struct vm_area_struct *vma, unsigned long start,
219			unsigned long end, struct page *ref_page)
220{
221	BUG();
222}
223
 
 
 
 
 
 
 
 
 
 
224#endif /* !CONFIG_HUGETLB_PAGE */
225/*
226 * hugepages at page global directory. If arch support
227 * hugepages at pgd level, they need to define this.
228 */
229#ifndef pgd_huge
230#define pgd_huge(x)	0
231#endif
232#ifndef p4d_huge
233#define p4d_huge(x)	0
234#endif
235
236#ifndef pgd_write
237static inline int pgd_write(pgd_t pgd)
238{
239	BUG();
240	return 0;
241}
242#endif
243
244#define HUGETLB_ANON_FILE "anon_hugepage"
245
246enum {
247	/*
248	 * The file will be used as an shm file so shmfs accounting rules
249	 * apply
250	 */
251	HUGETLB_SHMFS_INODE     = 1,
252	/*
253	 * The file is being created on the internal vfs mount and shmfs
254	 * accounting rules do not apply
255	 */
256	HUGETLB_ANONHUGE_INODE  = 2,
257};
258
259#ifdef CONFIG_HUGETLBFS
260struct hugetlbfs_sb_info {
261	long	max_inodes;   /* inodes allowed */
262	long	free_inodes;  /* inodes free */
263	spinlock_t	stat_lock;
264	struct hstate *hstate;
265	struct hugepage_subpool *spool;
266	kuid_t	uid;
267	kgid_t	gid;
268	umode_t mode;
269};
270
271static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
272{
273	return sb->s_fs_info;
274}
275
276struct hugetlbfs_inode_info {
277	struct shared_policy policy;
278	struct inode vfs_inode;
279	unsigned int seals;
280};
281
282static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
283{
284	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
285}
286
287extern const struct file_operations hugetlbfs_file_operations;
288extern const struct vm_operations_struct hugetlb_vm_ops;
289struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
290				struct user_struct **user, int creat_flags,
291				int page_size_log);
292
293static inline bool is_file_hugepages(struct file *file)
294{
295	if (file->f_op == &hugetlbfs_file_operations)
296		return true;
297
298	return is_file_shm_hugepages(file);
299}
300
301
 
 
 
302#else /* !CONFIG_HUGETLBFS */
303
304#define is_file_hugepages(file)			false
305static inline struct file *
306hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
307		struct user_struct **user, int creat_flags,
308		int page_size_log)
309{
310	return ERR_PTR(-ENOSYS);
311}
312
 
 
 
 
313#endif /* !CONFIG_HUGETLBFS */
314
315#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
316unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
317					unsigned long len, unsigned long pgoff,
318					unsigned long flags);
319#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321#ifdef CONFIG_HUGETLB_PAGE
322
323#define HSTATE_NAME_LEN 32
324/* Defines one hugetlb page size */
325struct hstate {
 
326	int next_nid_to_alloc;
327	int next_nid_to_free;
328	unsigned int order;
329	unsigned long mask;
330	unsigned long max_huge_pages;
331	unsigned long nr_huge_pages;
332	unsigned long free_huge_pages;
333	unsigned long resv_huge_pages;
334	unsigned long surplus_huge_pages;
335	unsigned long nr_overcommit_huge_pages;
336	struct list_head hugepage_activelist;
337	struct list_head hugepage_freelists[MAX_NUMNODES];
338	unsigned int nr_huge_pages_node[MAX_NUMNODES];
339	unsigned int free_huge_pages_node[MAX_NUMNODES];
340	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 
 
 
341#ifdef CONFIG_CGROUP_HUGETLB
342	/* cgroup control files */
343	struct cftype cgroup_files[5];
 
344#endif
345	char name[HSTATE_NAME_LEN];
346};
347
348struct huge_bootmem_page {
349	struct list_head list;
350	struct hstate *hstate;
351#ifdef CONFIG_HIGHMEM
352	phys_addr_t phys;
353#endif
354};
355
 
356struct page *alloc_huge_page(struct vm_area_struct *vma,
357				unsigned long addr, int avoid_reserve);
358struct page *alloc_huge_page_node(struct hstate *h, int nid);
359struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
360				nodemask_t *nmask);
361struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
362				unsigned long address);
363int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
364			pgoff_t idx);
 
 
365
366/* arch callback */
367int __init __alloc_bootmem_huge_page(struct hstate *h);
368int __init alloc_bootmem_huge_page(struct hstate *h);
369
370void __init hugetlb_bad_size(void);
371void __init hugetlb_add_hstate(unsigned order);
 
372struct hstate *size_to_hstate(unsigned long size);
373
374#ifndef HUGE_MAX_HSTATE
375#define HUGE_MAX_HSTATE 1
376#endif
377
378extern struct hstate hstates[HUGE_MAX_HSTATE];
379extern unsigned int default_hstate_idx;
380
381#define default_hstate (hstates[default_hstate_idx])
382
383static inline struct hstate *hstate_inode(struct inode *i)
 
 
 
 
 
 
 
 
 
384{
385	return HUGETLBFS_SB(i->i_sb)->hstate;
386}
387
388static inline struct hstate *hstate_file(struct file *f)
389{
390	return hstate_inode(file_inode(f));
391}
392
393static inline struct hstate *hstate_sizelog(int page_size_log)
394{
395	if (!page_size_log)
396		return &default_hstate;
397
398	return size_to_hstate(1UL << page_size_log);
399}
400
401static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
402{
403	return hstate_file(vma->vm_file);
404}
405
406static inline unsigned long huge_page_size(struct hstate *h)
407{
408	return (unsigned long)PAGE_SIZE << h->order;
409}
410
411extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
412
413extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
414
415static inline unsigned long huge_page_mask(struct hstate *h)
416{
417	return h->mask;
418}
419
420static inline unsigned int huge_page_order(struct hstate *h)
421{
422	return h->order;
423}
424
425static inline unsigned huge_page_shift(struct hstate *h)
426{
427	return h->order + PAGE_SHIFT;
428}
429
430static inline bool hstate_is_gigantic(struct hstate *h)
431{
432	return huge_page_order(h) >= MAX_ORDER;
433}
434
435static inline unsigned int pages_per_huge_page(struct hstate *h)
436{
437	return 1 << h->order;
438}
439
440static inline unsigned int blocks_per_huge_page(struct hstate *h)
441{
442	return huge_page_size(h) / 512;
443}
444
445#include <asm/hugetlb.h>
446
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447#ifndef arch_make_huge_pte
448static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
449				       struct page *page, int writable)
450{
451	return entry;
452}
453#endif
454
455static inline struct hstate *page_hstate(struct page *page)
456{
457	VM_BUG_ON_PAGE(!PageHuge(page), page);
458	return size_to_hstate(PAGE_SIZE << compound_order(page));
459}
460
461static inline unsigned hstate_index_to_shift(unsigned index)
462{
463	return hstates[index].order + PAGE_SHIFT;
464}
465
466static inline int hstate_index(struct hstate *h)
467{
468	return h - hstates;
469}
470
471pgoff_t __basepage_index(struct page *page);
472
473/* Return page->index in PAGE_SIZE units */
474static inline pgoff_t basepage_index(struct page *page)
475{
476	if (!PageCompound(page))
477		return page->index;
478
479	return __basepage_index(page);
480}
481
482extern int dissolve_free_huge_page(struct page *page);
483extern int dissolve_free_huge_pages(unsigned long start_pfn,
484				    unsigned long end_pfn);
485static inline bool hugepage_migration_supported(struct hstate *h)
 
 
 
486{
487#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
488	if ((huge_page_shift(h) == PMD_SHIFT) ||
489		(huge_page_shift(h) == PGDIR_SHIFT))
 
490		return true;
491	else
492		return false;
 
 
493#else
 
 
494	return false;
 
495#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
496}
497
498static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
499					   struct mm_struct *mm, pte_t *pte)
500{
501	if (huge_page_size(h) == PMD_SIZE)
502		return pmd_lockptr(mm, (pmd_t *) pte);
503	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
504	return &mm->page_table_lock;
505}
506
507#ifndef hugepages_supported
508/*
509 * Some platform decide whether they support huge pages at boot
510 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
511 * when there is no such support
512 */
513#define hugepages_supported() (HPAGE_SHIFT != 0)
514#endif
515
516void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
517
 
 
 
 
 
518static inline void hugetlb_count_add(long l, struct mm_struct *mm)
519{
520	atomic_long_add(l, &mm->hugetlb_usage);
521}
522
523static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
524{
525	atomic_long_sub(l, &mm->hugetlb_usage);
526}
527
528#ifndef set_huge_swap_pte_at
529static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
530					pte_t *ptep, pte_t pte, unsigned long sz)
531{
532	set_huge_pte_at(mm, addr, ptep, pte);
533}
534#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
535#else	/* CONFIG_HUGETLB_PAGE */
536struct hstate {};
537#define alloc_huge_page(v, a, r) NULL
538#define alloc_huge_page_node(h, nid) NULL
539#define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL
540#define alloc_huge_page_vma(h, vma, address) NULL
541#define alloc_bootmem_huge_page(h) NULL
542#define hstate_file(f) NULL
543#define hstate_sizelog(s) NULL
544#define hstate_vma(v) NULL
545#define hstate_inode(i) NULL
546#define page_hstate(page) NULL
547#define huge_page_size(h) PAGE_SIZE
548#define huge_page_mask(h) PAGE_MASK
549#define vma_kernel_pagesize(v) PAGE_SIZE
550#define vma_mmu_pagesize(v) PAGE_SIZE
551#define huge_page_order(h) 0
552#define huge_page_shift(h) PAGE_SHIFT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
553static inline bool hstate_is_gigantic(struct hstate *h)
554{
555	return false;
556}
557
558static inline unsigned int pages_per_huge_page(struct hstate *h)
559{
560	return 1;
561}
562
563static inline unsigned hstate_index_to_shift(unsigned index)
564{
565	return 0;
566}
567
568static inline int hstate_index(struct hstate *h)
569{
570	return 0;
571}
572
573static inline pgoff_t basepage_index(struct page *page)
574{
575	return page->index;
576}
577
578static inline int dissolve_free_huge_page(struct page *page)
579{
580	return 0;
581}
582
583static inline int dissolve_free_huge_pages(unsigned long start_pfn,
584					   unsigned long end_pfn)
585{
586	return 0;
587}
588
589static inline bool hugepage_migration_supported(struct hstate *h)
590{
591	return false;
592}
593
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
594static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
595					   struct mm_struct *mm, pte_t *pte)
596{
597	return &mm->page_table_lock;
598}
599
 
 
 
 
600static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
601{
602}
603
604static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
605{
606}
607
608static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
609					pte_t *ptep, pte_t pte, unsigned long sz)
610{
611}
612#endif	/* CONFIG_HUGETLB_PAGE */
613
 
 
 
 
 
 
614static inline spinlock_t *huge_pte_lock(struct hstate *h,
615					struct mm_struct *mm, pte_t *pte)
616{
617	spinlock_t *ptl;
618
619	ptl = huge_pte_lockptr(h, mm, pte);
620	spin_lock(ptl);
621	return ptl;
622}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
623
624#endif /* _LINUX_HUGETLB_H */