Linux Audio

Check our new training course

Loading...
v5.14.15
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_HUGETLB_H
   3#define _LINUX_HUGETLB_H
   4
 
   5#include <linux/mm_types.h>
   6#include <linux/mmdebug.h>
   7#include <linux/fs.h>
   8#include <linux/hugetlb_inline.h>
   9#include <linux/cgroup.h>
 
  10#include <linux/list.h>
  11#include <linux/kref.h>
  12#include <linux/pgtable.h>
  13#include <linux/gfp.h>
  14#include <linux/userfaultfd_k.h>
  15
  16struct ctl_table;
  17struct user_struct;
  18struct mmu_gather;
 
  19
  20#ifndef is_hugepd
  21typedef struct { unsigned long pd; } hugepd_t;
  22#define is_hugepd(hugepd) (0)
  23#define __hugepd(x) ((hugepd_t) { (x) })
  24#endif
  25
 
 
  26#ifdef CONFIG_HUGETLB_PAGE
  27
  28#include <linux/mempolicy.h>
  29#include <linux/shm.h>
  30#include <asm/tlbflush.h>
  31
  32/*
  33 * For HugeTLB page, there are more metadata to save in the struct page. But
  34 * the head struct page cannot meet our needs, so we have to abuse other tail
  35 * struct page to store the metadata. In order to avoid conflicts caused by
  36 * subsequent use of more tail struct pages, we gather these discrete indexes
  37 * of tail struct page here.
  38 */
  39enum {
  40	SUBPAGE_INDEX_SUBPOOL = 1,	/* reuse page->private */
  41#ifdef CONFIG_CGROUP_HUGETLB
  42	SUBPAGE_INDEX_CGROUP,		/* reuse page->private */
  43	SUBPAGE_INDEX_CGROUP_RSVD,	/* reuse page->private */
  44	__MAX_CGROUP_SUBPAGE_INDEX = SUBPAGE_INDEX_CGROUP_RSVD,
  45#endif
  46	__NR_USED_SUBPAGE,
  47};
  48
  49struct hugepage_subpool {
  50	spinlock_t lock;
  51	long count;
  52	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
  53	long used_hpages;	/* Used count against maximum, includes */
  54				/* both allocated and reserved pages. */
  55	struct hstate *hstate;
  56	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
  57	long rsv_hpages;	/* Pages reserved against global pool to */
  58				/* satisfy minimum size. */
  59};
  60
  61struct resv_map {
  62	struct kref refs;
  63	spinlock_t lock;
  64	struct list_head regions;
  65	long adds_in_progress;
  66	struct list_head region_cache;
  67	long region_cache_count;
 
  68#ifdef CONFIG_CGROUP_HUGETLB
  69	/*
  70	 * On private mappings, the counter to uncharge reservations is stored
  71	 * here. If these fields are 0, then either the mapping is shared, or
  72	 * cgroup accounting is disabled for this resv_map.
  73	 */
  74	struct page_counter *reservation_counter;
  75	unsigned long pages_per_hpage;
  76	struct cgroup_subsys_state *css;
  77#endif
  78};
  79
  80/*
  81 * Region tracking -- allows tracking of reservations and instantiated pages
  82 *                    across the pages in a mapping.
  83 *
  84 * The region data structures are embedded into a resv_map and protected
  85 * by a resv_map's lock.  The set of regions within the resv_map represent
  86 * reservations for huge pages, or huge pages that have already been
  87 * instantiated within the map.  The from and to elements are huge page
  88 * indices into the associated mapping.  from indicates the starting index
  89 * of the region.  to represents the first index past the end of  the region.
  90 *
  91 * For example, a file region structure with from == 0 and to == 4 represents
  92 * four huge pages in a mapping.  It is important to note that the to element
  93 * represents the first element past the end of the region. This is used in
  94 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
  95 *
  96 * Interval notation of the form [from, to) will be used to indicate that
  97 * the endpoint from is inclusive and to is exclusive.
  98 */
  99struct file_region {
 100	struct list_head link;
 101	long from;
 102	long to;
 103#ifdef CONFIG_CGROUP_HUGETLB
 104	/*
 105	 * On shared mappings, each reserved region appears as a struct
 106	 * file_region in resv_map. These fields hold the info needed to
 107	 * uncharge each reservation.
 108	 */
 109	struct page_counter *reservation_counter;
 110	struct cgroup_subsys_state *css;
 111#endif
 112};
 113
 
 
 
 
 
 
 114extern struct resv_map *resv_map_alloc(void);
 115void resv_map_release(struct kref *ref);
 116
 117extern spinlock_t hugetlb_lock;
 118extern int hugetlb_max_hstate __read_mostly;
 119#define for_each_hstate(h) \
 120	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
 121
 122struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
 123						long min_hpages);
 124void hugepage_put_subpool(struct hugepage_subpool *spool);
 125
 126void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
 127int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
 128int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
 129		loff_t *);
 130int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
 131		loff_t *);
 132int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
 133		loff_t *);
 134
 135int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
 136long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
 137			 struct page **, struct vm_area_struct **,
 138			 unsigned long *, unsigned long *, long, unsigned int,
 139			 int *);
 140void unmap_hugepage_range(struct vm_area_struct *,
 141			  unsigned long, unsigned long, struct page *);
 142void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 
 143			  struct vm_area_struct *vma,
 144			  unsigned long start, unsigned long end,
 145			  struct page *ref_page);
 146void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
 147				unsigned long start, unsigned long end,
 148				struct page *ref_page);
 149void hugetlb_report_meminfo(struct seq_file *);
 150int hugetlb_report_node_meminfo(char *buf, int len, int nid);
 151void hugetlb_show_meminfo(void);
 152unsigned long hugetlb_total_pages(void);
 153vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 154			unsigned long address, unsigned int flags);
 155#ifdef CONFIG_USERFAULTFD
 156int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
 157				struct vm_area_struct *dst_vma,
 158				unsigned long dst_addr,
 159				unsigned long src_addr,
 160				enum mcopy_atomic_mode mode,
 161				struct page **pagep);
 162#endif /* CONFIG_USERFAULTFD */
 163bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
 164						struct vm_area_struct *vma,
 165						vm_flags_t vm_flags);
 166long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
 167						long freed);
 168bool isolate_huge_page(struct page *page, struct list_head *list);
 169int get_hwpoison_huge_page(struct page *page, bool *hugetlb);
 170void putback_active_hugepage(struct page *page);
 171void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
 172void free_huge_page(struct page *page);
 
 173void hugetlb_fix_reserve_counts(struct inode *inode);
 174extern struct mutex *hugetlb_fault_mutex_table;
 175u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
 176
 177pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 178		      unsigned long addr, pud_t *pud);
 179
 180struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
 181
 182extern int sysctl_hugetlb_shm_group;
 183extern struct list_head huge_boot_pages;
 184
 185/* arch callbacks */
 186
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 187pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 188			unsigned long addr, unsigned long sz);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 189pte_t *huge_pte_offset(struct mm_struct *mm,
 190		       unsigned long addr, unsigned long sz);
 
 191int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
 192				unsigned long *addr, pte_t *ptep);
 193void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
 194				unsigned long *start, unsigned long *end);
 195struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
 196			      int write);
 197struct page *follow_huge_pd(struct vm_area_struct *vma,
 198			    unsigned long address, hugepd_t hpd,
 199			    int flags, int pdshift);
 200struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 201				pmd_t *pmd, int flags);
 202struct page *follow_huge_pud(struct mm_struct *mm, unsigned long address,
 203				pud_t *pud, int flags);
 204struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
 205			     pgd_t *pgd, int flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 206
 207int pmd_huge(pmd_t pmd);
 208int pud_huge(pud_t pud);
 209unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
 210		unsigned long address, unsigned long end, pgprot_t newprot);
 
 211
 212bool is_hugetlb_entry_migration(pte_t pte);
 
 213void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
 214
 215#else /* !CONFIG_HUGETLB_PAGE */
 216
 217static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
 
 
 
 
 218{
 219}
 220
 221static inline unsigned long hugetlb_total_pages(void)
 222{
 223	return 0;
 224}
 225
 226static inline struct address_space *hugetlb_page_mapping_lock_write(
 227							struct page *hpage)
 228{
 229	return NULL;
 230}
 231
 232static inline int huge_pmd_unshare(struct mm_struct *mm,
 233					struct vm_area_struct *vma,
 234					unsigned long *addr, pte_t *ptep)
 235{
 236	return 0;
 237}
 238
 239static inline void adjust_range_if_pmd_sharing_possible(
 240				struct vm_area_struct *vma,
 241				unsigned long *start, unsigned long *end)
 242{
 243}
 244
 245static inline long follow_hugetlb_page(struct mm_struct *mm,
 246			struct vm_area_struct *vma, struct page **pages,
 247			struct vm_area_struct **vmas, unsigned long *position,
 248			unsigned long *nr_pages, long i, unsigned int flags,
 249			int *nonblocking)
 250{
 251	BUG();
 252	return 0;
 253}
 254
 255static inline struct page *follow_huge_addr(struct mm_struct *mm,
 256					unsigned long address, int write)
 
 257{
 258	return ERR_PTR(-EINVAL);
 
 
 
 
 
 
 259}
 260
 261static inline int copy_hugetlb_page_range(struct mm_struct *dst,
 262			struct mm_struct *src, struct vm_area_struct *vma)
 
 
 
 
 
 
 
 
 
 
 
 
 263{
 264	BUG();
 265	return 0;
 266}
 267
 268static inline void hugetlb_report_meminfo(struct seq_file *m)
 269{
 270}
 271
 272static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
 273{
 274	return 0;
 275}
 276
 277static inline void hugetlb_show_meminfo(void)
 278{
 279}
 280
 281static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
 282				unsigned long address, hugepd_t hpd, int flags,
 283				int pdshift)
 284{
 285	return NULL;
 286}
 287
 288static inline struct page *follow_huge_pmd(struct mm_struct *mm,
 289				unsigned long address, pmd_t *pmd, int flags)
 290{
 291	return NULL;
 292}
 293
 294static inline struct page *follow_huge_pud(struct mm_struct *mm,
 295				unsigned long address, pud_t *pud, int flags)
 296{
 297	return NULL;
 298}
 299
 300static inline struct page *follow_huge_pgd(struct mm_struct *mm,
 301				unsigned long address, pgd_t *pgd, int flags)
 302{
 303	return NULL;
 304}
 305
 306static inline int prepare_hugepage_range(struct file *file,
 307				unsigned long addr, unsigned long len)
 
 
 
 
 
 
 
 
 308{
 309	return -EINVAL;
 310}
 311
 312static inline int pmd_huge(pmd_t pmd)
 313{
 314	return 0;
 315}
 316
 317static inline int pud_huge(pud_t pud)
 318{
 319	return 0;
 320}
 321
 322static inline int is_hugepage_only_range(struct mm_struct *mm,
 323					unsigned long addr, unsigned long len)
 324{
 325	return 0;
 326}
 327
 328static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 329				unsigned long addr, unsigned long end,
 330				unsigned long floor, unsigned long ceiling)
 331{
 332	BUG();
 333}
 334
 335#ifdef CONFIG_USERFAULTFD
 336static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
 337						pte_t *dst_pte,
 338						struct vm_area_struct *dst_vma,
 339						unsigned long dst_addr,
 340						unsigned long src_addr,
 341						enum mcopy_atomic_mode mode,
 342						struct page **pagep)
 343{
 344	BUG();
 345	return 0;
 346}
 347#endif /* CONFIG_USERFAULTFD */
 348
 349static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
 350					unsigned long sz)
 351{
 352	return NULL;
 353}
 354
 355static inline bool isolate_huge_page(struct page *page, struct list_head *list)
 356{
 357	return false;
 358}
 359
 360static inline int get_hwpoison_huge_page(struct page *page, bool *hugetlb)
 361{
 362	return 0;
 363}
 364
 365static inline void putback_active_hugepage(struct page *page)
 
 366{
 
 367}
 368
 369static inline void move_hugetlb_state(struct page *oldpage,
 370					struct page *newpage, int reason)
 371{
 372}
 373
 374static inline unsigned long hugetlb_change_protection(
 375			struct vm_area_struct *vma, unsigned long address,
 376			unsigned long end, pgprot_t newprot)
 377{
 378	return 0;
 379}
 380
 381static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
 382			struct vm_area_struct *vma, unsigned long start,
 383			unsigned long end, struct page *ref_page)
 
 384{
 385	BUG();
 386}
 387
 388static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 389			struct vm_area_struct *vma, unsigned long start,
 390			unsigned long end, struct page *ref_page)
 
 391{
 392	BUG();
 393}
 394
 395static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
 396			struct vm_area_struct *vma, unsigned long address,
 397			unsigned int flags)
 398{
 399	BUG();
 400	return 0;
 401}
 402
 403static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
 404
 405#endif /* !CONFIG_HUGETLB_PAGE */
 406/*
 407 * hugepages at page global directory. If arch support
 408 * hugepages at pgd level, they need to define this.
 409 */
 410#ifndef pgd_huge
 411#define pgd_huge(x)	0
 412#endif
 413#ifndef p4d_huge
 414#define p4d_huge(x)	0
 415#endif
 416
 417#ifndef pgd_write
 418static inline int pgd_write(pgd_t pgd)
 419{
 420	BUG();
 421	return 0;
 422}
 423#endif
 424
 425#define HUGETLB_ANON_FILE "anon_hugepage"
 426
 427enum {
 428	/*
 429	 * The file will be used as an shm file so shmfs accounting rules
 430	 * apply
 431	 */
 432	HUGETLB_SHMFS_INODE     = 1,
 433	/*
 434	 * The file is being created on the internal vfs mount and shmfs
 435	 * accounting rules do not apply
 436	 */
 437	HUGETLB_ANONHUGE_INODE  = 2,
 438};
 439
 440#ifdef CONFIG_HUGETLBFS
 441struct hugetlbfs_sb_info {
 442	long	max_inodes;   /* inodes allowed */
 443	long	free_inodes;  /* inodes free */
 444	spinlock_t	stat_lock;
 445	struct hstate *hstate;
 446	struct hugepage_subpool *spool;
 447	kuid_t	uid;
 448	kgid_t	gid;
 449	umode_t mode;
 450};
 451
 452static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 453{
 454	return sb->s_fs_info;
 455}
 456
 457struct hugetlbfs_inode_info {
 458	struct shared_policy policy;
 459	struct inode vfs_inode;
 460	unsigned int seals;
 461};
 462
 463static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
 464{
 465	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
 466}
 467
 468extern const struct file_operations hugetlbfs_file_operations;
 469extern const struct vm_operations_struct hugetlb_vm_ops;
 470struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 471				struct ucounts **ucounts, int creat_flags,
 472				int page_size_log);
 473
 474static inline bool is_file_hugepages(struct file *file)
 475{
 476	if (file->f_op == &hugetlbfs_file_operations)
 477		return true;
 478
 479	return is_file_shm_hugepages(file);
 480}
 481
 482static inline struct hstate *hstate_inode(struct inode *i)
 483{
 484	return HUGETLBFS_SB(i->i_sb)->hstate;
 485}
 486#else /* !CONFIG_HUGETLBFS */
 487
 488#define is_file_hugepages(file)			false
 489static inline struct file *
 490hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
 491		struct ucounts **ucounts, int creat_flags,
 492		int page_size_log)
 493{
 494	return ERR_PTR(-ENOSYS);
 495}
 496
 497static inline struct hstate *hstate_inode(struct inode *i)
 498{
 499	return NULL;
 500}
 501#endif /* !CONFIG_HUGETLBFS */
 502
 503#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 504unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 505					unsigned long len, unsigned long pgoff,
 506					unsigned long flags);
 507#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 508
 
 
 
 
 
 509/*
 510 * huegtlb page specific state flags.  These flags are located in page.private
 511 * of the hugetlb head page.  Functions created via the below macros should be
 512 * used to manipulate these flags.
 513 *
 514 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
 515 *	allocation time.  Cleared when page is fully instantiated.  Free
 516 *	routine checks flag to restore a reservation on error paths.
 517 *	Synchronization:  Examined or modified by code that knows it has
 518 *	the only reference to page.  i.e. After allocation but before use
 519 *	or when the page is being freed.
 520 * HPG_migratable  - Set after a newly allocated page is added to the page
 521 *	cache and/or page tables.  Indicates the page is a candidate for
 522 *	migration.
 523 *	Synchronization:  Initially set after new page allocation with no
 524 *	locking.  When examined and modified during migration processing
 525 *	(isolate, migrate, putback) the hugetlb_lock is held.
 526 * HPG_temporary - - Set on a page that is temporarily allocated from the buddy
 527 *	allocator.  Typically used for migration target pages when no pages
 528 *	are available in the pool.  The hugetlb free page path will
 529 *	immediately free pages with this flag set to the buddy allocator.
 530 *	Synchronization: Can be set after huge page allocation from buddy when
 531 *	code knows it has only reference.  All other examinations and
 532 *	modifications require hugetlb_lock.
 533 * HPG_freed - Set when page is on the free lists.
 534 *	Synchronization: hugetlb_lock held for examination and modification.
 535 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
 
 
 536 */
 537enum hugetlb_page_flags {
 538	HPG_restore_reserve = 0,
 539	HPG_migratable,
 540	HPG_temporary,
 541	HPG_freed,
 542	HPG_vmemmap_optimized,
 
 543	__NR_HPAGEFLAGS,
 544};
 545
 546/*
 547 * Macros to create test, set and clear function definitions for
 548 * hugetlb specific page flags.
 549 */
 550#ifdef CONFIG_HUGETLB_PAGE
 551#define TESTHPAGEFLAG(uname, flname)				\
 
 
 
 
 
 552static inline int HPage##uname(struct page *page)		\
 553	{ return test_bit(HPG_##flname, &(page->private)); }
 554
 555#define SETHPAGEFLAG(uname, flname)				\
 
 
 
 
 
 556static inline void SetHPage##uname(struct page *page)		\
 557	{ set_bit(HPG_##flname, &(page->private)); }
 558
 559#define CLEARHPAGEFLAG(uname, flname)				\
 
 
 
 
 
 560static inline void ClearHPage##uname(struct page *page)		\
 561	{ clear_bit(HPG_##flname, &(page->private)); }
 562#else
 563#define TESTHPAGEFLAG(uname, flname)				\
 
 
 
 564static inline int HPage##uname(struct page *page)		\
 565	{ return 0; }
 566
 567#define SETHPAGEFLAG(uname, flname)				\
 
 
 
 568static inline void SetHPage##uname(struct page *page)		\
 569	{ }
 570
 571#define CLEARHPAGEFLAG(uname, flname)				\
 
 
 
 572static inline void ClearHPage##uname(struct page *page)		\
 573	{ }
 574#endif
 575
 576#define HPAGEFLAG(uname, flname)				\
 577	TESTHPAGEFLAG(uname, flname)				\
 578	SETHPAGEFLAG(uname, flname)				\
 579	CLEARHPAGEFLAG(uname, flname)				\
 580
 581/*
 582 * Create functions associated with hugetlb page flags
 583 */
 584HPAGEFLAG(RestoreReserve, restore_reserve)
 585HPAGEFLAG(Migratable, migratable)
 586HPAGEFLAG(Temporary, temporary)
 587HPAGEFLAG(Freed, freed)
 588HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
 
 589
 590#ifdef CONFIG_HUGETLB_PAGE
 591
 592#define HSTATE_NAME_LEN 32
 593/* Defines one hugetlb page size */
 594struct hstate {
 595	struct mutex resize_lock;
 596	int next_nid_to_alloc;
 597	int next_nid_to_free;
 598	unsigned int order;
 
 599	unsigned long mask;
 600	unsigned long max_huge_pages;
 601	unsigned long nr_huge_pages;
 602	unsigned long free_huge_pages;
 603	unsigned long resv_huge_pages;
 604	unsigned long surplus_huge_pages;
 605	unsigned long nr_overcommit_huge_pages;
 606	struct list_head hugepage_activelist;
 607	struct list_head hugepage_freelists[MAX_NUMNODES];
 
 608	unsigned int nr_huge_pages_node[MAX_NUMNODES];
 609	unsigned int free_huge_pages_node[MAX_NUMNODES];
 610	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 611#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
 612	unsigned int nr_free_vmemmap_pages;
 613#endif
 614#ifdef CONFIG_CGROUP_HUGETLB
 615	/* cgroup control files */
 616	struct cftype cgroup_files_dfl[7];
 617	struct cftype cgroup_files_legacy[9];
 618#endif
 619	char name[HSTATE_NAME_LEN];
 620};
 621
 622struct huge_bootmem_page {
 623	struct list_head list;
 624	struct hstate *hstate;
 625};
 626
 627int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
 628struct page *alloc_huge_page(struct vm_area_struct *vma,
 629				unsigned long addr, int avoid_reserve);
 630struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 631				nodemask_t *nmask, gfp_t gfp_mask);
 632struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
 633				unsigned long address);
 634int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
 635			pgoff_t idx);
 636void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
 637				unsigned long address, struct page *page);
 638
 639/* arch callback */
 640int __init __alloc_bootmem_huge_page(struct hstate *h);
 641int __init alloc_bootmem_huge_page(struct hstate *h);
 
 642
 643void __init hugetlb_add_hstate(unsigned order);
 644bool __init arch_hugetlb_valid_size(unsigned long size);
 645struct hstate *size_to_hstate(unsigned long size);
 646
 647#ifndef HUGE_MAX_HSTATE
 648#define HUGE_MAX_HSTATE 1
 649#endif
 650
 651extern struct hstate hstates[HUGE_MAX_HSTATE];
 652extern unsigned int default_hstate_idx;
 653
 654#define default_hstate (hstates[default_hstate_idx])
 655
 656/*
 657 * hugetlb page subpool pointer located in hpage[1].private
 658 */
 659static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
 660{
 661	return (void *)page_private(hpage + SUBPAGE_INDEX_SUBPOOL);
 662}
 663
 664static inline void hugetlb_set_page_subpool(struct page *hpage,
 665					struct hugepage_subpool *subpool)
 666{
 667	set_page_private(hpage + SUBPAGE_INDEX_SUBPOOL, (unsigned long)subpool);
 668}
 669
 670static inline struct hstate *hstate_file(struct file *f)
 671{
 672	return hstate_inode(file_inode(f));
 673}
 674
 675static inline struct hstate *hstate_sizelog(int page_size_log)
 676{
 677	if (!page_size_log)
 678		return &default_hstate;
 679
 680	return size_to_hstate(1UL << page_size_log);
 
 
 
 681}
 682
 683static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 684{
 685	return hstate_file(vma->vm_file);
 686}
 687
 688static inline unsigned long huge_page_size(struct hstate *h)
 689{
 690	return (unsigned long)PAGE_SIZE << h->order;
 691}
 692
 693extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
 694
 695extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
 696
 697static inline unsigned long huge_page_mask(struct hstate *h)
 698{
 699	return h->mask;
 700}
 701
 702static inline unsigned int huge_page_order(struct hstate *h)
 703{
 704	return h->order;
 705}
 706
 707static inline unsigned huge_page_shift(struct hstate *h)
 708{
 709	return h->order + PAGE_SHIFT;
 710}
 711
 712static inline bool hstate_is_gigantic(struct hstate *h)
 713{
 714	return huge_page_order(h) >= MAX_ORDER;
 715}
 716
 717static inline unsigned int pages_per_huge_page(struct hstate *h)
 718{
 719	return 1 << h->order;
 720}
 721
 722static inline unsigned int blocks_per_huge_page(struct hstate *h)
 723{
 724	return huge_page_size(h) / 512;
 725}
 726
 
 
 
 
 
 
 727#include <asm/hugetlb.h>
 728
 729#ifndef is_hugepage_only_range
 730static inline int is_hugepage_only_range(struct mm_struct *mm,
 731					unsigned long addr, unsigned long len)
 732{
 733	return 0;
 734}
 735#define is_hugepage_only_range is_hugepage_only_range
 736#endif
 737
 738#ifndef arch_clear_hugepage_flags
 739static inline void arch_clear_hugepage_flags(struct page *page) { }
 740#define arch_clear_hugepage_flags arch_clear_hugepage_flags
 741#endif
 742
 743#ifndef arch_make_huge_pte
 744static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
 745				       vm_flags_t flags)
 746{
 747	return entry;
 748}
 749#endif
 750
 751static inline struct hstate *page_hstate(struct page *page)
 752{
 753	VM_BUG_ON_PAGE(!PageHuge(page), page);
 754	return size_to_hstate(page_size(page));
 755}
 756
 757static inline unsigned hstate_index_to_shift(unsigned index)
 758{
 759	return hstates[index].order + PAGE_SHIFT;
 760}
 761
 762static inline int hstate_index(struct hstate *h)
 763{
 764	return h - hstates;
 765}
 766
 767extern int dissolve_free_huge_page(struct page *page);
 768extern int dissolve_free_huge_pages(unsigned long start_pfn,
 769				    unsigned long end_pfn);
 770
 
 
 
 
 
 
 
 
 771#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 772#ifndef arch_hugetlb_migration_supported
 773static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 774{
 775	if ((huge_page_shift(h) == PMD_SHIFT) ||
 776		(huge_page_shift(h) == PUD_SHIFT) ||
 777			(huge_page_shift(h) == PGDIR_SHIFT))
 778		return true;
 779	else
 780		return false;
 781}
 782#endif
 783#else
 784static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 785{
 786	return false;
 787}
 788#endif
 789
 790static inline bool hugepage_migration_supported(struct hstate *h)
 791{
 792	return arch_hugetlb_migration_supported(h);
 793}
 794
 795/*
 796 * Movability check is different as compared to migration check.
 797 * It determines whether or not a huge page should be placed on
 798 * movable zone or not. Movability of any huge page should be
 799 * required only if huge page size is supported for migration.
 800 * There won't be any reason for the huge page to be movable if
 801 * it is not migratable to start with. Also the size of the huge
 802 * page should be large enough to be placed under a movable zone
 803 * and still feasible enough to be migratable. Just the presence
 804 * in movable zone does not make the migration feasible.
 805 *
 806 * So even though large huge page sizes like the gigantic ones
 807 * are migratable they should not be movable because its not
 808 * feasible to migrate them from movable zone.
 809 */
 810static inline bool hugepage_movable_supported(struct hstate *h)
 811{
 812	if (!hugepage_migration_supported(h))
 813		return false;
 814
 815	if (hstate_is_gigantic(h))
 816		return false;
 817	return true;
 818}
 819
 820/* Movability of hugepages depends on migration support. */
 821static inline gfp_t htlb_alloc_mask(struct hstate *h)
 822{
 823	if (hugepage_movable_supported(h))
 824		return GFP_HIGHUSER_MOVABLE;
 825	else
 826		return GFP_HIGHUSER;
 827}
 828
 829static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
 830{
 831	gfp_t modified_mask = htlb_alloc_mask(h);
 832
 833	/* Some callers might want to enforce node */
 834	modified_mask |= (gfp_mask & __GFP_THISNODE);
 835
 836	modified_mask |= (gfp_mask & __GFP_NOWARN);
 837
 838	return modified_mask;
 839}
 840
 841static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 842					   struct mm_struct *mm, pte_t *pte)
 843{
 844	if (huge_page_size(h) == PMD_SIZE)
 845		return pmd_lockptr(mm, (pmd_t *) pte);
 846	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
 847	return &mm->page_table_lock;
 848}
 849
 850#ifndef hugepages_supported
 851/*
 852 * Some platform decide whether they support huge pages at boot
 853 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 854 * when there is no such support
 855 */
 856#define hugepages_supported() (HPAGE_SHIFT != 0)
 857#endif
 858
 859void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
 860
 861static inline void hugetlb_count_init(struct mm_struct *mm)
 862{
 863	atomic_long_set(&mm->hugetlb_usage, 0);
 864}
 865
 866static inline void hugetlb_count_add(long l, struct mm_struct *mm)
 867{
 868	atomic_long_add(l, &mm->hugetlb_usage);
 869}
 870
 871static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
 872{
 873	atomic_long_sub(l, &mm->hugetlb_usage);
 874}
 875
 876#ifndef set_huge_swap_pte_at
 877static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
 878					pte_t *ptep, pte_t pte, unsigned long sz)
 879{
 880	set_huge_pte_at(mm, addr, ptep, pte);
 881}
 882#endif
 883
 884#ifndef huge_ptep_modify_prot_start
 885#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
 886static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
 887						unsigned long addr, pte_t *ptep)
 888{
 889	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
 890}
 891#endif
 892
 893#ifndef huge_ptep_modify_prot_commit
 894#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
 895static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
 896						unsigned long addr, pte_t *ptep,
 897						pte_t old_pte, pte_t pte)
 898{
 899	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
 
 
 900}
 901#endif
 902
 
 
 
 
 
 
 
 
 
 
 903#else	/* CONFIG_HUGETLB_PAGE */
 904struct hstate {};
 905
 906static inline struct hugepage_subpool *hugetlb_page_subpool(struct page *hpage)
 
 
 
 
 
 
 907{
 908	return NULL;
 909}
 910
 911static inline int isolate_or_dissolve_huge_page(struct page *page,
 912						struct list_head *list)
 913{
 914	return -ENOMEM;
 915}
 916
 917static inline struct page *alloc_huge_page(struct vm_area_struct *vma,
 918					   unsigned long addr,
 919					   int avoid_reserve)
 920{
 921	return NULL;
 922}
 923
 924static inline struct page *
 925alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
 926			nodemask_t *nmask, gfp_t gfp_mask)
 927{
 928	return NULL;
 929}
 930
 931static inline struct page *alloc_huge_page_vma(struct hstate *h,
 932					       struct vm_area_struct *vma,
 933					       unsigned long address)
 934{
 935	return NULL;
 936}
 937
 938static inline int __alloc_bootmem_huge_page(struct hstate *h)
 939{
 940	return 0;
 941}
 942
 943static inline struct hstate *hstate_file(struct file *f)
 944{
 945	return NULL;
 946}
 947
 948static inline struct hstate *hstate_sizelog(int page_size_log)
 949{
 950	return NULL;
 951}
 952
 953static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 954{
 955	return NULL;
 956}
 957
 958static inline struct hstate *page_hstate(struct page *page)
 
 
 
 
 
 959{
 960	return NULL;
 961}
 962
 963static inline unsigned long huge_page_size(struct hstate *h)
 964{
 965	return PAGE_SIZE;
 966}
 967
 968static inline unsigned long huge_page_mask(struct hstate *h)
 969{
 970	return PAGE_MASK;
 971}
 972
 973static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
 974{
 975	return PAGE_SIZE;
 976}
 977
 978static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
 979{
 980	return PAGE_SIZE;
 981}
 982
 983static inline unsigned int huge_page_order(struct hstate *h)
 984{
 985	return 0;
 986}
 987
 988static inline unsigned int huge_page_shift(struct hstate *h)
 989{
 990	return PAGE_SHIFT;
 991}
 992
 993static inline bool hstate_is_gigantic(struct hstate *h)
 994{
 995	return false;
 996}
 997
 998static inline unsigned int pages_per_huge_page(struct hstate *h)
 999{
1000	return 1;
1001}
1002
1003static inline unsigned hstate_index_to_shift(unsigned index)
1004{
1005	return 0;
1006}
1007
1008static inline int hstate_index(struct hstate *h)
1009{
1010	return 0;
1011}
1012
1013static inline int dissolve_free_huge_page(struct page *page)
1014{
1015	return 0;
1016}
1017
1018static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1019					   unsigned long end_pfn)
1020{
1021	return 0;
1022}
1023
1024static inline bool hugepage_migration_supported(struct hstate *h)
1025{
1026	return false;
1027}
1028
1029static inline bool hugepage_movable_supported(struct hstate *h)
1030{
1031	return false;
1032}
1033
1034static inline gfp_t htlb_alloc_mask(struct hstate *h)
1035{
1036	return 0;
1037}
1038
1039static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1040{
1041	return 0;
1042}
1043
1044static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1045					   struct mm_struct *mm, pte_t *pte)
1046{
1047	return &mm->page_table_lock;
1048}
1049
1050static inline void hugetlb_count_init(struct mm_struct *mm)
1051{
1052}
1053
1054static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1055{
1056}
1057
1058static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1059{
1060}
1061
1062static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
1063					pte_t *ptep, pte_t pte, unsigned long sz)
1064{
1065}
1066#endif	/* CONFIG_HUGETLB_PAGE */
1067
1068#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
1069extern bool hugetlb_free_vmemmap_enabled;
1070#else
1071#define hugetlb_free_vmemmap_enabled	false
1072#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1073
1074static inline spinlock_t *huge_pte_lock(struct hstate *h,
1075					struct mm_struct *mm, pte_t *pte)
1076{
1077	spinlock_t *ptl;
1078
1079	ptl = huge_pte_lockptr(h, mm, pte);
1080	spin_lock(ptl);
1081	return ptl;
1082}
1083
1084#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1085extern void __init hugetlb_cma_reserve(int order);
1086extern void __init hugetlb_cma_check(void);
1087#else
1088static inline __init void hugetlb_cma_reserve(int order)
1089{
1090}
1091static inline __init void hugetlb_cma_check(void)
 
 
 
 
 
 
 
 
1092{
 
1093}
1094#endif
1095
1096bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1097
1098#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1099/*
1100 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1101 * implement this.
1102 */
1103#define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1104#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1105
1106#endif /* _LINUX_HUGETLB_H */
v6.9.4
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _LINUX_HUGETLB_H
   3#define _LINUX_HUGETLB_H
   4
   5#include <linux/mm.h>
   6#include <linux/mm_types.h>
   7#include <linux/mmdebug.h>
   8#include <linux/fs.h>
   9#include <linux/hugetlb_inline.h>
  10#include <linux/cgroup.h>
  11#include <linux/page_ref.h>
  12#include <linux/list.h>
  13#include <linux/kref.h>
  14#include <linux/pgtable.h>
  15#include <linux/gfp.h>
  16#include <linux/userfaultfd_k.h>
  17
  18struct ctl_table;
  19struct user_struct;
  20struct mmu_gather;
  21struct node;
  22
  23#ifndef CONFIG_ARCH_HAS_HUGEPD
  24typedef struct { unsigned long pd; } hugepd_t;
  25#define is_hugepd(hugepd) (0)
  26#define __hugepd(x) ((hugepd_t) { (x) })
  27#endif
  28
  29void free_huge_folio(struct folio *folio);
  30
  31#ifdef CONFIG_HUGETLB_PAGE
  32
  33#include <linux/pagemap.h>
  34#include <linux/shm.h>
  35#include <asm/tlbflush.h>
  36
  37/*
  38 * For HugeTLB page, there are more metadata to save in the struct page. But
  39 * the head struct page cannot meet our needs, so we have to abuse other tail
  40 * struct page to store the metadata.
 
 
  41 */
  42#define __NR_USED_SUBPAGE 3
 
 
 
 
 
 
 
 
  43
  44struct hugepage_subpool {
  45	spinlock_t lock;
  46	long count;
  47	long max_hpages;	/* Maximum huge pages or -1 if no maximum. */
  48	long used_hpages;	/* Used count against maximum, includes */
  49				/* both allocated and reserved pages. */
  50	struct hstate *hstate;
  51	long min_hpages;	/* Minimum huge pages or -1 if no minimum. */
  52	long rsv_hpages;	/* Pages reserved against global pool to */
  53				/* satisfy minimum size. */
  54};
  55
  56struct resv_map {
  57	struct kref refs;
  58	spinlock_t lock;
  59	struct list_head regions;
  60	long adds_in_progress;
  61	struct list_head region_cache;
  62	long region_cache_count;
  63	struct rw_semaphore rw_sema;
  64#ifdef CONFIG_CGROUP_HUGETLB
  65	/*
  66	 * On private mappings, the counter to uncharge reservations is stored
  67	 * here. If these fields are 0, then either the mapping is shared, or
  68	 * cgroup accounting is disabled for this resv_map.
  69	 */
  70	struct page_counter *reservation_counter;
  71	unsigned long pages_per_hpage;
  72	struct cgroup_subsys_state *css;
  73#endif
  74};
  75
  76/*
  77 * Region tracking -- allows tracking of reservations and instantiated pages
  78 *                    across the pages in a mapping.
  79 *
  80 * The region data structures are embedded into a resv_map and protected
  81 * by a resv_map's lock.  The set of regions within the resv_map represent
  82 * reservations for huge pages, or huge pages that have already been
  83 * instantiated within the map.  The from and to elements are huge page
  84 * indices into the associated mapping.  from indicates the starting index
  85 * of the region.  to represents the first index past the end of  the region.
  86 *
  87 * For example, a file region structure with from == 0 and to == 4 represents
  88 * four huge pages in a mapping.  It is important to note that the to element
  89 * represents the first element past the end of the region. This is used in
  90 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
  91 *
  92 * Interval notation of the form [from, to) will be used to indicate that
  93 * the endpoint from is inclusive and to is exclusive.
  94 */
  95struct file_region {
  96	struct list_head link;
  97	long from;
  98	long to;
  99#ifdef CONFIG_CGROUP_HUGETLB
 100	/*
 101	 * On shared mappings, each reserved region appears as a struct
 102	 * file_region in resv_map. These fields hold the info needed to
 103	 * uncharge each reservation.
 104	 */
 105	struct page_counter *reservation_counter;
 106	struct cgroup_subsys_state *css;
 107#endif
 108};
 109
 110struct hugetlb_vma_lock {
 111	struct kref refs;
 112	struct rw_semaphore rw_sema;
 113	struct vm_area_struct *vma;
 114};
 115
 116extern struct resv_map *resv_map_alloc(void);
 117void resv_map_release(struct kref *ref);
 118
 119extern spinlock_t hugetlb_lock;
 120extern int hugetlb_max_hstate __read_mostly;
 121#define for_each_hstate(h) \
 122	for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++)
 123
 124struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
 125						long min_hpages);
 126void hugepage_put_subpool(struct hugepage_subpool *spool);
 127
 128void hugetlb_dup_vma_private(struct vm_area_struct *vma);
 129void clear_vma_resv_huge_pages(struct vm_area_struct *vma);
 130int move_hugetlb_page_tables(struct vm_area_struct *vma,
 131			     struct vm_area_struct *new_vma,
 132			     unsigned long old_addr, unsigned long new_addr,
 133			     unsigned long len);
 134int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
 135			    struct vm_area_struct *, struct vm_area_struct *);
 136struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
 137				      unsigned long address, unsigned int flags,
 138				      unsigned int *page_mask);
 
 
 
 139void unmap_hugepage_range(struct vm_area_struct *,
 140			  unsigned long, unsigned long, struct page *,
 141			  zap_flags_t);
 142void __unmap_hugepage_range(struct mmu_gather *tlb,
 143			  struct vm_area_struct *vma,
 144			  unsigned long start, unsigned long end,
 145			  struct page *ref_page, zap_flags_t zap_flags);
 
 
 
 146void hugetlb_report_meminfo(struct seq_file *);
 147int hugetlb_report_node_meminfo(char *buf, int len, int nid);
 148void hugetlb_show_meminfo_node(int nid);
 149unsigned long hugetlb_total_pages(void);
 150vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
 151			unsigned long address, unsigned int flags);
 152#ifdef CONFIG_USERFAULTFD
 153int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 154			     struct vm_area_struct *dst_vma,
 155			     unsigned long dst_addr,
 156			     unsigned long src_addr,
 157			     uffd_flags_t flags,
 158			     struct folio **foliop);
 159#endif /* CONFIG_USERFAULTFD */
 160bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
 161						struct vm_area_struct *vma,
 162						vm_flags_t vm_flags);
 163long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
 164						long freed);
 165bool isolate_hugetlb(struct folio *folio, struct list_head *list);
 166int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison);
 167int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
 168				bool *migratable_cleared);
 169void folio_putback_active_hugetlb(struct folio *folio);
 170void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
 171void hugetlb_fix_reserve_counts(struct inode *inode);
 172extern struct mutex *hugetlb_fault_mutex_table;
 173u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
 174
 175pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
 176		      unsigned long addr, pud_t *pud);
 177
 178struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
 179
 180extern int sysctl_hugetlb_shm_group;
 181extern struct list_head huge_boot_pages[MAX_NUMNODES];
 182
 183/* arch callbacks */
 184
 185#ifndef CONFIG_HIGHPTE
 186/*
 187 * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures
 188 * which may go down to the lowest PTE level in their huge_pte_offset() and
 189 * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap().
 190 */
 191static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address)
 192{
 193	return pte_offset_kernel(pmd, address);
 194}
 195static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd,
 196				    unsigned long address)
 197{
 198	return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address);
 199}
 200#endif
 201
 202pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
 203			unsigned long addr, unsigned long sz);
 204/*
 205 * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE.
 206 * Returns the pte_t* if found, or NULL if the address is not mapped.
 207 *
 208 * IMPORTANT: we should normally not directly call this function, instead
 209 * this is only a common interface to implement arch-specific
 210 * walker. Please use hugetlb_walk() instead, because that will attempt to
 211 * verify the locking for you.
 212 *
 213 * Since this function will walk all the pgtable pages (including not only
 214 * high-level pgtable page, but also PUD entry that can be unshared
 215 * concurrently for VM_SHARED), the caller of this function should be
 216 * responsible of its thread safety.  One can follow this rule:
 217 *
 218 *  (1) For private mappings: pmd unsharing is not possible, so holding the
 219 *      mmap_lock for either read or write is sufficient. Most callers
 220 *      already hold the mmap_lock, so normally, no special action is
 221 *      required.
 222 *
 223 *  (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged
 224 *      pgtable page can go away from under us!  It can be done by a pmd
 225 *      unshare with a follow up munmap() on the other process), then we
 226 *      need either:
 227 *
 228 *     (2.1) hugetlb vma lock read or write held, to make sure pmd unshare
 229 *           won't happen upon the range (it also makes sure the pte_t we
 230 *           read is the right and stable one), or,
 231 *
 232 *     (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make
 233 *           sure even if unshare happened the racy unmap() will wait until
 234 *           i_mmap_rwsem is released.
 235 *
 236 * Option (2.1) is the safest, which guarantees pte stability from pmd
 237 * sharing pov, until the vma lock released.  Option (2.2) doesn't protect
 238 * a concurrent pmd unshare, but it makes sure the pgtable page is safe to
 239 * access.
 240 */
 241pte_t *huge_pte_offset(struct mm_struct *mm,
 242		       unsigned long addr, unsigned long sz);
 243unsigned long hugetlb_mask_last_page(struct hstate *h);
 244int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
 245				unsigned long addr, pte_t *ptep);
 246void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
 247				unsigned long *start, unsigned long *end);
 248
 249extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
 250				unsigned long *begin, unsigned long *end);
 251extern void __hugetlb_zap_end(struct vm_area_struct *vma,
 252			      struct zap_details *details);
 253
 254static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
 255				     unsigned long *start, unsigned long *end)
 256{
 257	if (is_vm_hugetlb_page(vma))
 258		__hugetlb_zap_begin(vma, start, end);
 259}
 260
 261static inline void hugetlb_zap_end(struct vm_area_struct *vma,
 262				   struct zap_details *details)
 263{
 264	if (is_vm_hugetlb_page(vma))
 265		__hugetlb_zap_end(vma, details);
 266}
 267
 268void hugetlb_vma_lock_read(struct vm_area_struct *vma);
 269void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
 270void hugetlb_vma_lock_write(struct vm_area_struct *vma);
 271void hugetlb_vma_unlock_write(struct vm_area_struct *vma);
 272int hugetlb_vma_trylock_write(struct vm_area_struct *vma);
 273void hugetlb_vma_assert_locked(struct vm_area_struct *vma);
 274void hugetlb_vma_lock_release(struct kref *kref);
 275
 276int pmd_huge(pmd_t pmd);
 277int pud_huge(pud_t pud);
 278long hugetlb_change_protection(struct vm_area_struct *vma,
 279		unsigned long address, unsigned long end, pgprot_t newprot,
 280		unsigned long cp_flags);
 281
 282bool is_hugetlb_entry_migration(pte_t pte);
 283bool is_hugetlb_entry_hwpoisoned(pte_t pte);
 284void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
 285
 286#else /* !CONFIG_HUGETLB_PAGE */
 287
 288static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma)
 289{
 290}
 291
 292static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
 293{
 294}
 295
 296static inline unsigned long hugetlb_total_pages(void)
 297{
 298	return 0;
 299}
 300
 301static inline struct address_space *hugetlb_page_mapping_lock_write(
 302							struct page *hpage)
 303{
 304	return NULL;
 305}
 306
 307static inline int huge_pmd_unshare(struct mm_struct *mm,
 308					struct vm_area_struct *vma,
 309					unsigned long addr, pte_t *ptep)
 310{
 311	return 0;
 312}
 313
 314static inline void adjust_range_if_pmd_sharing_possible(
 315				struct vm_area_struct *vma,
 316				unsigned long *start, unsigned long *end)
 317{
 318}
 319
 320static inline void hugetlb_zap_begin(
 321				struct vm_area_struct *vma,
 322				unsigned long *start, unsigned long *end)
 
 
 323{
 
 
 324}
 325
 326static inline void hugetlb_zap_end(
 327				struct vm_area_struct *vma,
 328				struct zap_details *details)
 329{
 330}
 331
 332static inline struct page *hugetlb_follow_page_mask(
 333    struct vm_area_struct *vma, unsigned long address, unsigned int flags,
 334    unsigned int *page_mask)
 335{
 336	BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
 337}
 338
 339static inline int copy_hugetlb_page_range(struct mm_struct *dst,
 340					  struct mm_struct *src,
 341					  struct vm_area_struct *dst_vma,
 342					  struct vm_area_struct *src_vma)
 343{
 344	BUG();
 345	return 0;
 346}
 347
 348static inline int move_hugetlb_page_tables(struct vm_area_struct *vma,
 349					   struct vm_area_struct *new_vma,
 350					   unsigned long old_addr,
 351					   unsigned long new_addr,
 352					   unsigned long len)
 353{
 354	BUG();
 355	return 0;
 356}
 357
 358static inline void hugetlb_report_meminfo(struct seq_file *m)
 359{
 360}
 361
 362static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid)
 363{
 364	return 0;
 365}
 366
 367static inline void hugetlb_show_meminfo_node(int nid)
 368{
 369}
 370
 371static inline int prepare_hugepage_range(struct file *file,
 372				unsigned long addr, unsigned long len)
 
 373{
 374	return -EINVAL;
 375}
 376
 377static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
 
 378{
 
 379}
 380
 381static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
 
 382{
 
 383}
 384
 385static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma)
 
 386{
 
 387}
 388
 389static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
 390{
 391}
 392
 393static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
 394{
 395	return 1;
 396}
 397
 398static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
 399{
 
 400}
 401
 402static inline int pmd_huge(pmd_t pmd)
 403{
 404	return 0;
 405}
 406
 407static inline int pud_huge(pud_t pud)
 408{
 409	return 0;
 410}
 411
 412static inline int is_hugepage_only_range(struct mm_struct *mm,
 413					unsigned long addr, unsigned long len)
 414{
 415	return 0;
 416}
 417
 418static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 419				unsigned long addr, unsigned long end,
 420				unsigned long floor, unsigned long ceiling)
 421{
 422	BUG();
 423}
 424
 425#ifdef CONFIG_USERFAULTFD
 426static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
 427					   struct vm_area_struct *dst_vma,
 428					   unsigned long dst_addr,
 429					   unsigned long src_addr,
 430					   uffd_flags_t flags,
 431					   struct folio **foliop)
 
 432{
 433	BUG();
 434	return 0;
 435}
 436#endif /* CONFIG_USERFAULTFD */
 437
 438static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
 439					unsigned long sz)
 440{
 441	return NULL;
 442}
 443
 444static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list)
 445{
 446	return false;
 447}
 448
 449static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
 450{
 451	return 0;
 452}
 453
 454static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
 455					bool *migratable_cleared)
 456{
 457	return 0;
 458}
 459
 460static inline void folio_putback_active_hugetlb(struct folio *folio)
 
 461{
 462}
 463
 464static inline void move_hugetlb_state(struct folio *old_folio,
 465					struct folio *new_folio, int reason)
 
 466{
 
 467}
 468
 469static inline long hugetlb_change_protection(
 470			struct vm_area_struct *vma, unsigned long address,
 471			unsigned long end, pgprot_t newprot,
 472			unsigned long cp_flags)
 473{
 474	return 0;
 475}
 476
 477static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
 478			struct vm_area_struct *vma, unsigned long start,
 479			unsigned long end, struct page *ref_page,
 480			zap_flags_t zap_flags)
 481{
 482	BUG();
 483}
 484
 485static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
 486			struct vm_area_struct *vma, unsigned long address,
 487			unsigned int flags)
 488{
 489	BUG();
 490	return 0;
 491}
 492
 493static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
 494
 495#endif /* !CONFIG_HUGETLB_PAGE */
 496/*
 497 * hugepages at page global directory. If arch support
 498 * hugepages at pgd level, they need to define this.
 499 */
 500#ifndef pgd_huge
 501#define pgd_huge(x)	0
 502#endif
 503#ifndef p4d_huge
 504#define p4d_huge(x)	0
 505#endif
 506
 507#ifndef pgd_write
 508static inline int pgd_write(pgd_t pgd)
 509{
 510	BUG();
 511	return 0;
 512}
 513#endif
 514
 515#define HUGETLB_ANON_FILE "anon_hugepage"
 516
 517enum {
 518	/*
 519	 * The file will be used as an shm file so shmfs accounting rules
 520	 * apply
 521	 */
 522	HUGETLB_SHMFS_INODE     = 1,
 523	/*
 524	 * The file is being created on the internal vfs mount and shmfs
 525	 * accounting rules do not apply
 526	 */
 527	HUGETLB_ANONHUGE_INODE  = 2,
 528};
 529
 530#ifdef CONFIG_HUGETLBFS
 531struct hugetlbfs_sb_info {
 532	long	max_inodes;   /* inodes allowed */
 533	long	free_inodes;  /* inodes free */
 534	spinlock_t	stat_lock;
 535	struct hstate *hstate;
 536	struct hugepage_subpool *spool;
 537	kuid_t	uid;
 538	kgid_t	gid;
 539	umode_t mode;
 540};
 541
 542static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
 543{
 544	return sb->s_fs_info;
 545}
 546
 547struct hugetlbfs_inode_info {
 
 548	struct inode vfs_inode;
 549	unsigned int seals;
 550};
 551
 552static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode)
 553{
 554	return container_of(inode, struct hugetlbfs_inode_info, vfs_inode);
 555}
 556
 557extern const struct file_operations hugetlbfs_file_operations;
 558extern const struct vm_operations_struct hugetlb_vm_ops;
 559struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct,
 560				int creat_flags, int page_size_log);
 
 561
 562static inline bool is_file_hugepages(struct file *file)
 563{
 564	if (file->f_op == &hugetlbfs_file_operations)
 565		return true;
 566
 567	return is_file_shm_hugepages(file);
 568}
 569
 570static inline struct hstate *hstate_inode(struct inode *i)
 571{
 572	return HUGETLBFS_SB(i->i_sb)->hstate;
 573}
 574#else /* !CONFIG_HUGETLBFS */
 575
 576#define is_file_hugepages(file)			false
 577static inline struct file *
 578hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag,
 579		int creat_flags, int page_size_log)
 
 580{
 581	return ERR_PTR(-ENOSYS);
 582}
 583
 584static inline struct hstate *hstate_inode(struct inode *i)
 585{
 586	return NULL;
 587}
 588#endif /* !CONFIG_HUGETLBFS */
 589
 590#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 591unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 592					unsigned long len, unsigned long pgoff,
 593					unsigned long flags);
 594#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
 595
 596unsigned long
 597generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 598				  unsigned long len, unsigned long pgoff,
 599				  unsigned long flags);
 600
 601/*
 602 * huegtlb page specific state flags.  These flags are located in page.private
 603 * of the hugetlb head page.  Functions created via the below macros should be
 604 * used to manipulate these flags.
 605 *
 606 * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at
 607 *	allocation time.  Cleared when page is fully instantiated.  Free
 608 *	routine checks flag to restore a reservation on error paths.
 609 *	Synchronization:  Examined or modified by code that knows it has
 610 *	the only reference to page.  i.e. After allocation but before use
 611 *	or when the page is being freed.
 612 * HPG_migratable  - Set after a newly allocated page is added to the page
 613 *	cache and/or page tables.  Indicates the page is a candidate for
 614 *	migration.
 615 *	Synchronization:  Initially set after new page allocation with no
 616 *	locking.  When examined and modified during migration processing
 617 *	(isolate, migrate, putback) the hugetlb_lock is held.
 618 * HPG_temporary - Set on a page that is temporarily allocated from the buddy
 619 *	allocator.  Typically used for migration target pages when no pages
 620 *	are available in the pool.  The hugetlb free page path will
 621 *	immediately free pages with this flag set to the buddy allocator.
 622 *	Synchronization: Can be set after huge page allocation from buddy when
 623 *	code knows it has only reference.  All other examinations and
 624 *	modifications require hugetlb_lock.
 625 * HPG_freed - Set when page is on the free lists.
 626 *	Synchronization: hugetlb_lock held for examination and modification.
 627 * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed.
 628 * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page
 629 *     that is not tracked by raw_hwp_page list.
 630 */
 631enum hugetlb_page_flags {
 632	HPG_restore_reserve = 0,
 633	HPG_migratable,
 634	HPG_temporary,
 635	HPG_freed,
 636	HPG_vmemmap_optimized,
 637	HPG_raw_hwp_unreliable,
 638	__NR_HPAGEFLAGS,
 639};
 640
 641/*
 642 * Macros to create test, set and clear function definitions for
 643 * hugetlb specific page flags.
 644 */
 645#ifdef CONFIG_HUGETLB_PAGE
 646#define TESTHPAGEFLAG(uname, flname)				\
 647static __always_inline						\
 648bool folio_test_hugetlb_##flname(struct folio *folio)		\
 649	{	void *private = &folio->private;		\
 650		return test_bit(HPG_##flname, private);		\
 651	}							\
 652static inline int HPage##uname(struct page *page)		\
 653	{ return test_bit(HPG_##flname, &(page->private)); }
 654
 655#define SETHPAGEFLAG(uname, flname)				\
 656static __always_inline						\
 657void folio_set_hugetlb_##flname(struct folio *folio)		\
 658	{	void *private = &folio->private;		\
 659		set_bit(HPG_##flname, private);			\
 660	}							\
 661static inline void SetHPage##uname(struct page *page)		\
 662	{ set_bit(HPG_##flname, &(page->private)); }
 663
 664#define CLEARHPAGEFLAG(uname, flname)				\
 665static __always_inline						\
 666void folio_clear_hugetlb_##flname(struct folio *folio)		\
 667	{	void *private = &folio->private;		\
 668		clear_bit(HPG_##flname, private);		\
 669	}							\
 670static inline void ClearHPage##uname(struct page *page)		\
 671	{ clear_bit(HPG_##flname, &(page->private)); }
 672#else
 673#define TESTHPAGEFLAG(uname, flname)				\
 674static inline bool						\
 675folio_test_hugetlb_##flname(struct folio *folio)		\
 676	{ return 0; }						\
 677static inline int HPage##uname(struct page *page)		\
 678	{ return 0; }
 679
 680#define SETHPAGEFLAG(uname, flname)				\
 681static inline void						\
 682folio_set_hugetlb_##flname(struct folio *folio) 		\
 683	{ }							\
 684static inline void SetHPage##uname(struct page *page)		\
 685	{ }
 686
 687#define CLEARHPAGEFLAG(uname, flname)				\
 688static inline void						\
 689folio_clear_hugetlb_##flname(struct folio *folio)		\
 690	{ }							\
 691static inline void ClearHPage##uname(struct page *page)		\
 692	{ }
 693#endif
 694
 695#define HPAGEFLAG(uname, flname)				\
 696	TESTHPAGEFLAG(uname, flname)				\
 697	SETHPAGEFLAG(uname, flname)				\
 698	CLEARHPAGEFLAG(uname, flname)				\
 699
 700/*
 701 * Create functions associated with hugetlb page flags
 702 */
 703HPAGEFLAG(RestoreReserve, restore_reserve)
 704HPAGEFLAG(Migratable, migratable)
 705HPAGEFLAG(Temporary, temporary)
 706HPAGEFLAG(Freed, freed)
 707HPAGEFLAG(VmemmapOptimized, vmemmap_optimized)
 708HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable)
 709
 710#ifdef CONFIG_HUGETLB_PAGE
 711
 712#define HSTATE_NAME_LEN 32
 713/* Defines one hugetlb page size */
 714struct hstate {
 715	struct mutex resize_lock;
 716	int next_nid_to_alloc;
 717	int next_nid_to_free;
 718	unsigned int order;
 719	unsigned int demote_order;
 720	unsigned long mask;
 721	unsigned long max_huge_pages;
 722	unsigned long nr_huge_pages;
 723	unsigned long free_huge_pages;
 724	unsigned long resv_huge_pages;
 725	unsigned long surplus_huge_pages;
 726	unsigned long nr_overcommit_huge_pages;
 727	struct list_head hugepage_activelist;
 728	struct list_head hugepage_freelists[MAX_NUMNODES];
 729	unsigned int max_huge_pages_node[MAX_NUMNODES];
 730	unsigned int nr_huge_pages_node[MAX_NUMNODES];
 731	unsigned int free_huge_pages_node[MAX_NUMNODES];
 732	unsigned int surplus_huge_pages_node[MAX_NUMNODES];
 
 
 
 733#ifdef CONFIG_CGROUP_HUGETLB
 734	/* cgroup control files */
 735	struct cftype cgroup_files_dfl[8];
 736	struct cftype cgroup_files_legacy[10];
 737#endif
 738	char name[HSTATE_NAME_LEN];
 739};
 740
 741struct huge_bootmem_page {
 742	struct list_head list;
 743	struct hstate *hstate;
 744};
 745
 746int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list);
 747struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
 748				unsigned long addr, int avoid_reserve);
 749struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
 750				nodemask_t *nmask, gfp_t gfp_mask);
 751int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
 
 
 752			pgoff_t idx);
 753void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
 754				unsigned long address, struct folio *folio);
 755
 756/* arch callback */
 757int __init __alloc_bootmem_huge_page(struct hstate *h, int nid);
 758int __init alloc_bootmem_huge_page(struct hstate *h, int nid);
 759bool __init hugetlb_node_alloc_supported(void);
 760
 761void __init hugetlb_add_hstate(unsigned order);
 762bool __init arch_hugetlb_valid_size(unsigned long size);
 763struct hstate *size_to_hstate(unsigned long size);
 764
 765#ifndef HUGE_MAX_HSTATE
 766#define HUGE_MAX_HSTATE 1
 767#endif
 768
 769extern struct hstate hstates[HUGE_MAX_HSTATE];
 770extern unsigned int default_hstate_idx;
 771
 772#define default_hstate (hstates[default_hstate_idx])
 773
 774static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
 
 
 
 775{
 776	return folio->_hugetlb_subpool;
 777}
 778
 779static inline void hugetlb_set_folio_subpool(struct folio *folio,
 780					struct hugepage_subpool *subpool)
 781{
 782	folio->_hugetlb_subpool = subpool;
 783}
 784
 785static inline struct hstate *hstate_file(struct file *f)
 786{
 787	return hstate_inode(file_inode(f));
 788}
 789
 790static inline struct hstate *hstate_sizelog(int page_size_log)
 791{
 792	if (!page_size_log)
 793		return &default_hstate;
 794
 795	if (page_size_log < BITS_PER_LONG)
 796		return size_to_hstate(1UL << page_size_log);
 797
 798	return NULL;
 799}
 800
 801static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
 802{
 803	return hstate_file(vma->vm_file);
 804}
 805
 806static inline unsigned long huge_page_size(const struct hstate *h)
 807{
 808	return (unsigned long)PAGE_SIZE << h->order;
 809}
 810
 811extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma);
 812
 813extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma);
 814
 815static inline unsigned long huge_page_mask(struct hstate *h)
 816{
 817	return h->mask;
 818}
 819
 820static inline unsigned int huge_page_order(struct hstate *h)
 821{
 822	return h->order;
 823}
 824
 825static inline unsigned huge_page_shift(struct hstate *h)
 826{
 827	return h->order + PAGE_SHIFT;
 828}
 829
 830static inline bool hstate_is_gigantic(struct hstate *h)
 831{
 832	return huge_page_order(h) > MAX_PAGE_ORDER;
 833}
 834
 835static inline unsigned int pages_per_huge_page(const struct hstate *h)
 836{
 837	return 1 << h->order;
 838}
 839
 840static inline unsigned int blocks_per_huge_page(struct hstate *h)
 841{
 842	return huge_page_size(h) / 512;
 843}
 844
 845static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
 846				struct address_space *mapping, pgoff_t idx)
 847{
 848	return filemap_lock_folio(mapping, idx << huge_page_order(h));
 849}
 850
 851#include <asm/hugetlb.h>
 852
 853#ifndef is_hugepage_only_range
 854static inline int is_hugepage_only_range(struct mm_struct *mm,
 855					unsigned long addr, unsigned long len)
 856{
 857	return 0;
 858}
 859#define is_hugepage_only_range is_hugepage_only_range
 860#endif
 861
 862#ifndef arch_clear_hugepage_flags
 863static inline void arch_clear_hugepage_flags(struct page *page) { }
 864#define arch_clear_hugepage_flags arch_clear_hugepage_flags
 865#endif
 866
 867#ifndef arch_make_huge_pte
 868static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift,
 869				       vm_flags_t flags)
 870{
 871	return pte_mkhuge(entry);
 872}
 873#endif
 874
 875static inline struct hstate *folio_hstate(struct folio *folio)
 876{
 877	VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
 878	return size_to_hstate(folio_size(folio));
 879}
 880
 881static inline unsigned hstate_index_to_shift(unsigned index)
 882{
 883	return hstates[index].order + PAGE_SHIFT;
 884}
 885
 886static inline int hstate_index(struct hstate *h)
 887{
 888	return h - hstates;
 889}
 890
 891extern int dissolve_free_huge_page(struct page *page);
 892extern int dissolve_free_huge_pages(unsigned long start_pfn,
 893				    unsigned long end_pfn);
 894
 895#ifdef CONFIG_MEMORY_FAILURE
 896extern void folio_clear_hugetlb_hwpoison(struct folio *folio);
 897#else
 898static inline void folio_clear_hugetlb_hwpoison(struct folio *folio)
 899{
 900}
 901#endif
 902
 903#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION
 904#ifndef arch_hugetlb_migration_supported
 905static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 906{
 907	if ((huge_page_shift(h) == PMD_SHIFT) ||
 908		(huge_page_shift(h) == PUD_SHIFT) ||
 909			(huge_page_shift(h) == PGDIR_SHIFT))
 910		return true;
 911	else
 912		return false;
 913}
 914#endif
 915#else
 916static inline bool arch_hugetlb_migration_supported(struct hstate *h)
 917{
 918	return false;
 919}
 920#endif
 921
 922static inline bool hugepage_migration_supported(struct hstate *h)
 923{
 924	return arch_hugetlb_migration_supported(h);
 925}
 926
 927/*
 928 * Movability check is different as compared to migration check.
 929 * It determines whether or not a huge page should be placed on
 930 * movable zone or not. Movability of any huge page should be
 931 * required only if huge page size is supported for migration.
 932 * There won't be any reason for the huge page to be movable if
 933 * it is not migratable to start with. Also the size of the huge
 934 * page should be large enough to be placed under a movable zone
 935 * and still feasible enough to be migratable. Just the presence
 936 * in movable zone does not make the migration feasible.
 937 *
 938 * So even though large huge page sizes like the gigantic ones
 939 * are migratable they should not be movable because its not
 940 * feasible to migrate them from movable zone.
 941 */
 942static inline bool hugepage_movable_supported(struct hstate *h)
 943{
 944	if (!hugepage_migration_supported(h))
 945		return false;
 946
 947	if (hstate_is_gigantic(h))
 948		return false;
 949	return true;
 950}
 951
 952/* Movability of hugepages depends on migration support. */
 953static inline gfp_t htlb_alloc_mask(struct hstate *h)
 954{
 955	if (hugepage_movable_supported(h))
 956		return GFP_HIGHUSER_MOVABLE;
 957	else
 958		return GFP_HIGHUSER;
 959}
 960
 961static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
 962{
 963	gfp_t modified_mask = htlb_alloc_mask(h);
 964
 965	/* Some callers might want to enforce node */
 966	modified_mask |= (gfp_mask & __GFP_THISNODE);
 967
 968	modified_mask |= (gfp_mask & __GFP_NOWARN);
 969
 970	return modified_mask;
 971}
 972
 973static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
 974					   struct mm_struct *mm, pte_t *pte)
 975{
 976	if (huge_page_size(h) == PMD_SIZE)
 977		return pmd_lockptr(mm, (pmd_t *) pte);
 978	VM_BUG_ON(huge_page_size(h) == PAGE_SIZE);
 979	return &mm->page_table_lock;
 980}
 981
 982#ifndef hugepages_supported
 983/*
 984 * Some platform decide whether they support huge pages at boot
 985 * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0
 986 * when there is no such support
 987 */
 988#define hugepages_supported() (HPAGE_SHIFT != 0)
 989#endif
 990
 991void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm);
 992
 993static inline void hugetlb_count_init(struct mm_struct *mm)
 994{
 995	atomic_long_set(&mm->hugetlb_usage, 0);
 996}
 997
 998static inline void hugetlb_count_add(long l, struct mm_struct *mm)
 999{
1000	atomic_long_add(l, &mm->hugetlb_usage);
1001}
1002
1003static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1004{
1005	atomic_long_sub(l, &mm->hugetlb_usage);
1006}
1007
 
 
 
 
 
 
 
 
1008#ifndef huge_ptep_modify_prot_start
1009#define huge_ptep_modify_prot_start huge_ptep_modify_prot_start
1010static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma,
1011						unsigned long addr, pte_t *ptep)
1012{
1013	return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
1014}
1015#endif
1016
1017#ifndef huge_ptep_modify_prot_commit
1018#define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit
1019static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
1020						unsigned long addr, pte_t *ptep,
1021						pte_t old_pte, pte_t pte)
1022{
1023	unsigned long psize = huge_page_size(hstate_vma(vma));
1024
1025	set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
1026}
1027#endif
1028
1029#ifdef CONFIG_NUMA
1030void hugetlb_register_node(struct node *node);
1031void hugetlb_unregister_node(struct node *node);
1032#endif
1033
1034/*
1035 * Check if a given raw @page in a hugepage is HWPOISON.
1036 */
1037bool is_raw_hwpoison_page_in_hugepage(struct page *page);
1038
1039#else	/* CONFIG_HUGETLB_PAGE */
1040struct hstate {};
1041
1042static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
1043{
1044	return NULL;
1045}
1046
1047static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
1048				struct address_space *mapping, pgoff_t idx)
1049{
1050	return NULL;
1051}
1052
1053static inline int isolate_or_dissolve_huge_page(struct page *page,
1054						struct list_head *list)
1055{
1056	return -ENOMEM;
1057}
1058
1059static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
1060					   unsigned long addr,
1061					   int avoid_reserve)
1062{
1063	return NULL;
1064}
1065
1066static inline struct folio *
1067alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
1068			nodemask_t *nmask, gfp_t gfp_mask)
1069{
1070	return NULL;
1071}
1072
 
 
 
 
 
 
 
1073static inline int __alloc_bootmem_huge_page(struct hstate *h)
1074{
1075	return 0;
1076}
1077
1078static inline struct hstate *hstate_file(struct file *f)
1079{
1080	return NULL;
1081}
1082
1083static inline struct hstate *hstate_sizelog(int page_size_log)
1084{
1085	return NULL;
1086}
1087
1088static inline struct hstate *hstate_vma(struct vm_area_struct *vma)
1089{
1090	return NULL;
1091}
1092
1093static inline struct hstate *folio_hstate(struct folio *folio)
1094{
1095	return NULL;
1096}
1097
1098static inline struct hstate *size_to_hstate(unsigned long size)
1099{
1100	return NULL;
1101}
1102
1103static inline unsigned long huge_page_size(struct hstate *h)
1104{
1105	return PAGE_SIZE;
1106}
1107
1108static inline unsigned long huge_page_mask(struct hstate *h)
1109{
1110	return PAGE_MASK;
1111}
1112
1113static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1114{
1115	return PAGE_SIZE;
1116}
1117
1118static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1119{
1120	return PAGE_SIZE;
1121}
1122
1123static inline unsigned int huge_page_order(struct hstate *h)
1124{
1125	return 0;
1126}
1127
1128static inline unsigned int huge_page_shift(struct hstate *h)
1129{
1130	return PAGE_SHIFT;
1131}
1132
1133static inline bool hstate_is_gigantic(struct hstate *h)
1134{
1135	return false;
1136}
1137
1138static inline unsigned int pages_per_huge_page(struct hstate *h)
1139{
1140	return 1;
1141}
1142
1143static inline unsigned hstate_index_to_shift(unsigned index)
1144{
1145	return 0;
1146}
1147
1148static inline int hstate_index(struct hstate *h)
1149{
1150	return 0;
1151}
1152
1153static inline int dissolve_free_huge_page(struct page *page)
1154{
1155	return 0;
1156}
1157
1158static inline int dissolve_free_huge_pages(unsigned long start_pfn,
1159					   unsigned long end_pfn)
1160{
1161	return 0;
1162}
1163
1164static inline bool hugepage_migration_supported(struct hstate *h)
1165{
1166	return false;
1167}
1168
1169static inline bool hugepage_movable_supported(struct hstate *h)
1170{
1171	return false;
1172}
1173
1174static inline gfp_t htlb_alloc_mask(struct hstate *h)
1175{
1176	return 0;
1177}
1178
1179static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask)
1180{
1181	return 0;
1182}
1183
1184static inline spinlock_t *huge_pte_lockptr(struct hstate *h,
1185					   struct mm_struct *mm, pte_t *pte)
1186{
1187	return &mm->page_table_lock;
1188}
1189
1190static inline void hugetlb_count_init(struct mm_struct *mm)
1191{
1192}
1193
1194static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m)
1195{
1196}
1197
1198static inline void hugetlb_count_sub(long l, struct mm_struct *mm)
1199{
1200}
1201
1202static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
1203					  unsigned long addr, pte_t *ptep)
1204{
1205#ifdef CONFIG_MMU
1206	return ptep_get(ptep);
 
 
 
1207#else
1208	return *ptep;
1209#endif
1210}
1211
1212static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
1213				   pte_t *ptep, pte_t pte, unsigned long sz)
1214{
1215}
1216
1217static inline void hugetlb_register_node(struct node *node)
1218{
1219}
1220
1221static inline void hugetlb_unregister_node(struct node *node)
1222{
1223}
1224#endif	/* CONFIG_HUGETLB_PAGE */
1225
1226static inline spinlock_t *huge_pte_lock(struct hstate *h,
1227					struct mm_struct *mm, pte_t *pte)
1228{
1229	spinlock_t *ptl;
1230
1231	ptl = huge_pte_lockptr(h, mm, pte);
1232	spin_lock(ptl);
1233	return ptl;
1234}
1235
1236#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
1237extern void __init hugetlb_cma_reserve(int order);
 
1238#else
1239static inline __init void hugetlb_cma_reserve(int order)
1240{
1241}
1242#endif
1243
1244#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
1245static inline bool hugetlb_pmd_shared(pte_t *pte)
1246{
1247	return page_count(virt_to_page(pte)) > 1;
1248}
1249#else
1250static inline bool hugetlb_pmd_shared(pte_t *pte)
1251{
1252	return false;
1253}
1254#endif
1255
1256bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr);
1257
1258#ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
1259/*
1260 * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
1261 * implement this.
1262 */
1263#define flush_hugetlb_tlb_range(vma, addr, end)	flush_tlb_range(vma, addr, end)
1264#endif
1265
1266static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
1267{
1268	return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
1269}
1270
1271bool __vma_private_lock(struct vm_area_struct *vma);
1272
1273/*
1274 * Safe version of huge_pte_offset() to check the locks.  See comments
1275 * above huge_pte_offset().
1276 */
1277static inline pte_t *
1278hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz)
1279{
1280#if defined(CONFIG_HUGETLB_PAGE) && \
1281	defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP)
1282	struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1283
1284	/*
1285	 * If pmd sharing possible, locking needed to safely walk the
1286	 * hugetlb pgtables.  More information can be found at the comment
1287	 * above huge_pte_offset() in the same file.
1288	 *
1289	 * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP.
1290	 */
1291	if (__vma_shareable_lock(vma))
1292		WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) &&
1293			     !lockdep_is_held(
1294				 &vma->vm_file->f_mapping->i_mmap_rwsem));
1295#endif
1296	return huge_pte_offset(vma->vm_mm, addr, sz);
1297}
1298
1299#endif /* _LINUX_HUGETLB_H */