Linux Audio

Check our new training course

Loading...
v4.17
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_PGTABLE_H
   3#define _ASM_X86_PGTABLE_H
   4
   5#include <linux/mem_encrypt.h>
   6#include <asm/page.h>
   7#include <asm/pgtable_types.h>
   8
   9/*
  10 * Macro to mark a page protection value as UC-
  11 */
  12#define pgprot_noncached(prot)						\
  13	((boot_cpu_data.x86 > 3)					\
  14	 ? (__pgprot(pgprot_val(prot) |					\
  15		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
  16	 : (prot))
  17
  18/*
  19 * Macros to add or remove encryption attribute
  20 */
  21#define pgprot_encrypted(prot)	__pgprot(__sme_set(pgprot_val(prot)))
  22#define pgprot_decrypted(prot)	__pgprot(__sme_clr(pgprot_val(prot)))
  23
  24#ifndef __ASSEMBLY__
 
  25#include <asm/x86_init.h>
 
 
 
 
 
  26
  27extern pgd_t early_top_pgt[PTRS_PER_PGD];
  28int __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
 
 
 
 
 
 
 
 
  29
  30void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
  31void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user);
  32void ptdump_walk_pgd_level_checkwx(void);
 
 
  33
  34#ifdef CONFIG_DEBUG_WX
  35#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
  36#else
  37#define debug_checkwx() do { } while (0)
  38#endif
  39
  40/*
  41 * ZERO_PAGE is a global shared page that is always zero: used
  42 * for zero-mapped memory areas etc..
  43 */
  44extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  45	__visible;
  46#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  47
  48extern spinlock_t pgd_lock;
  49extern struct list_head pgd_list;
  50
  51extern struct mm_struct *pgd_page_get_mm(struct page *page);
  52
  53extern pmdval_t early_pmd_flags;
  54
  55#ifdef CONFIG_PARAVIRT
  56#include <asm/paravirt.h>
  57#else  /* !CONFIG_PARAVIRT */
  58#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
  59#define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
  60
  61#define set_pte_atomic(ptep, pte)					\
  62	native_set_pte_atomic(ptep, pte)
  63
  64#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
  65
  66#ifndef __PAGETABLE_P4D_FOLDED
  67#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
  68#define pgd_clear(pgd)			(pgtable_l5_enabled ? native_pgd_clear(pgd) : 0)
  69#endif
  70
  71#ifndef set_p4d
  72# define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
  73#endif
  74
  75#ifndef __PAGETABLE_PUD_FOLDED
  76#define p4d_clear(p4d)			native_p4d_clear(p4d)
  77#endif
  78
  79#ifndef set_pud
  80# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
  81#endif
  82
  83#ifndef __PAGETABLE_PUD_FOLDED
  84#define pud_clear(pud)			native_pud_clear(pud)
  85#endif
  86
  87#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
  88#define pmd_clear(pmd)			native_pmd_clear(pmd)
  89
  90#define pgd_val(x)	native_pgd_val(x)
  91#define __pgd(x)	native_make_pgd(x)
  92
  93#ifndef __PAGETABLE_P4D_FOLDED
  94#define p4d_val(x)	native_p4d_val(x)
  95#define __p4d(x)	native_make_p4d(x)
  96#endif
  97
  98#ifndef __PAGETABLE_PUD_FOLDED
  99#define pud_val(x)	native_pud_val(x)
 100#define __pud(x)	native_make_pud(x)
 101#endif
 102
 103#ifndef __PAGETABLE_PMD_FOLDED
 104#define pmd_val(x)	native_pmd_val(x)
 105#define __pmd(x)	native_make_pmd(x)
 106#endif
 107
 108#define pte_val(x)	native_pte_val(x)
 109#define __pte(x)	native_make_pte(x)
 110
 111#define arch_end_context_switch(prev)	do {} while(0)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 112
 113#endif	/* CONFIG_PARAVIRT */
 
 
 
 
 
 114
 115/*
 116 * The following only work if pte_present() is true.
 117 * Undefined behaviour if not..
 118 */
 119static inline int pte_dirty(pte_t pte)
 120{
 121	return pte_flags(pte) & _PAGE_DIRTY;
 122}
 123
 
 
 
 
 
 124
 125static inline u32 read_pkru(void)
 126{
 127	if (boot_cpu_has(X86_FEATURE_OSPKE))
 128		return __read_pkru();
 129	return 0;
 130}
 131
 132static inline void write_pkru(u32 pkru)
 133{
 134	if (boot_cpu_has(X86_FEATURE_OSPKE))
 135		__write_pkru(pkru);
 136}
 137
 138static inline int pte_young(pte_t pte)
 
 139{
 140	return pte_flags(pte) & _PAGE_ACCESSED;
 141}
 142
 143static inline int pmd_dirty(pmd_t pmd)
 144{
 145	return pmd_flags(pmd) & _PAGE_DIRTY;
 
 
 146}
 147
 
 148static inline int pmd_young(pmd_t pmd)
 149{
 150	return pmd_flags(pmd) & _PAGE_ACCESSED;
 151}
 152
 153static inline int pud_dirty(pud_t pud)
 154{
 155	return pud_flags(pud) & _PAGE_DIRTY;
 156}
 157
 158static inline int pud_young(pud_t pud)
 159{
 160	return pud_flags(pud) & _PAGE_ACCESSED;
 161}
 162
 
 
 
 
 
 
 
 163static inline int pte_write(pte_t pte)
 164{
 165	return pte_flags(pte) & _PAGE_RW;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 166}
 167
 168static inline int pte_huge(pte_t pte)
 169{
 170	return pte_flags(pte) & _PAGE_PSE;
 171}
 172
 173static inline int pte_global(pte_t pte)
 174{
 175	return pte_flags(pte) & _PAGE_GLOBAL;
 176}
 177
 178static inline int pte_exec(pte_t pte)
 179{
 180	return !(pte_flags(pte) & _PAGE_NX);
 181}
 182
 183static inline int pte_special(pte_t pte)
 184{
 185	return pte_flags(pte) & _PAGE_SPECIAL;
 186}
 187
 
 
 
 
 
 
 188static inline unsigned long pte_pfn(pte_t pte)
 189{
 190	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
 
 
 191}
 192
 193static inline unsigned long pmd_pfn(pmd_t pmd)
 194{
 195	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
 
 
 196}
 197
 
 198static inline unsigned long pud_pfn(pud_t pud)
 199{
 200	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
 
 
 201}
 202
 203static inline unsigned long p4d_pfn(p4d_t p4d)
 204{
 205	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
 206}
 207
 208static inline unsigned long pgd_pfn(pgd_t pgd)
 209{
 210	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 211}
 212
 213static inline int p4d_large(p4d_t p4d)
 
 214{
 215	/* No 512 GiB pages yet */
 216	return 0;
 217}
 218
 219#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
 220
 221static inline int pmd_large(pmd_t pte)
 
 222{
 223	return pmd_flags(pte) & _PAGE_PSE;
 224}
 225
 226#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
 227static inline int pmd_trans_huge(pmd_t pmd)
 228{
 229	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 230}
 231
 232#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 233static inline int pud_trans_huge(pud_t pud)
 234{
 235	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 236}
 237#endif
 238
 239#define has_transparent_hugepage has_transparent_hugepage
 240static inline int has_transparent_hugepage(void)
 241{
 242	return boot_cpu_has(X86_FEATURE_PSE);
 243}
 244
 245#ifdef __HAVE_ARCH_PTE_DEVMAP
 246static inline int pmd_devmap(pmd_t pmd)
 247{
 248	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
 249}
 250
 251#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 252static inline int pud_devmap(pud_t pud)
 253{
 254	return !!(pud_val(pud) & _PAGE_DEVMAP);
 255}
 256#else
 257static inline int pud_devmap(pud_t pud)
 258{
 259	return 0;
 260}
 261#endif
 262
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 263static inline int pgd_devmap(pgd_t pgd)
 264{
 265	return 0;
 266}
 267#endif
 268#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 269
 270static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 271{
 272	pteval_t v = native_pte_val(pte);
 273
 274	return native_make_pte(v | set);
 275}
 276
 277static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
 278{
 279	pteval_t v = native_pte_val(pte);
 280
 281	return native_make_pte(v & ~clear);
 282}
 283
 284static inline pte_t pte_mkclean(pte_t pte)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 285{
 286	return pte_clear_flags(pte, _PAGE_DIRTY);
 
 
 
 
 
 287}
 288
 289static inline pte_t pte_mkold(pte_t pte)
 290{
 291	return pte_clear_flags(pte, _PAGE_ACCESSED);
 
 
 
 
 
 
 
 
 
 
 
 292}
 293
 294static inline pte_t pte_wrprotect(pte_t pte)
 295{
 296	return pte_clear_flags(pte, _PAGE_RW);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 297}
 298
 299static inline pte_t pte_mkexec(pte_t pte)
 300{
 301	return pte_clear_flags(pte, _PAGE_NX);
 302}
 303
 304static inline pte_t pte_mkdirty(pte_t pte)
 305{
 306	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 
 
 
 
 
 
 
 
 
 307}
 308
 309static inline pte_t pte_mkyoung(pte_t pte)
 310{
 311	return pte_set_flags(pte, _PAGE_ACCESSED);
 312}
 313
 314static inline pte_t pte_mkwrite(pte_t pte)
 315{
 316	return pte_set_flags(pte, _PAGE_RW);
 317}
 318
 
 
 
 
 319static inline pte_t pte_mkhuge(pte_t pte)
 320{
 321	return pte_set_flags(pte, _PAGE_PSE);
 322}
 323
 324static inline pte_t pte_clrhuge(pte_t pte)
 325{
 326	return pte_clear_flags(pte, _PAGE_PSE);
 327}
 328
 329static inline pte_t pte_mkglobal(pte_t pte)
 330{
 331	return pte_set_flags(pte, _PAGE_GLOBAL);
 332}
 333
 334static inline pte_t pte_clrglobal(pte_t pte)
 335{
 336	return pte_clear_flags(pte, _PAGE_GLOBAL);
 337}
 338
 339static inline pte_t pte_mkspecial(pte_t pte)
 340{
 341	return pte_set_flags(pte, _PAGE_SPECIAL);
 342}
 343
 344static inline pte_t pte_mkdevmap(pte_t pte)
 345{
 346	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
 347}
 348
 349static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 
 350{
 351	pmdval_t v = native_pmd_val(pmd);
 352
 353	return native_make_pmd(v | set);
 
 354}
 355
 356static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 
 357{
 358	pmdval_t v = native_pmd_val(pmd);
 359
 360	return native_make_pmd(v & ~clear);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 361}
 362
 
 
 
 
 
 
 
 
 
 
 
 363static inline pmd_t pmd_mkold(pmd_t pmd)
 364{
 365	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
 366}
 367
 368static inline pmd_t pmd_mkclean(pmd_t pmd)
 369{
 370	return pmd_clear_flags(pmd, _PAGE_DIRTY);
 371}
 372
 373static inline pmd_t pmd_wrprotect(pmd_t pmd)
 374{
 375	return pmd_clear_flags(pmd, _PAGE_RW);
 
 
 376}
 377
 378static inline pmd_t pmd_mkdirty(pmd_t pmd)
 379{
 380	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 
 
 381}
 382
 383static inline pmd_t pmd_mkdevmap(pmd_t pmd)
 384{
 385	return pmd_set_flags(pmd, _PAGE_DEVMAP);
 386}
 387
 388static inline pmd_t pmd_mkhuge(pmd_t pmd)
 389{
 390	return pmd_set_flags(pmd, _PAGE_PSE);
 391}
 392
 393static inline pmd_t pmd_mkyoung(pmd_t pmd)
 394{
 395	return pmd_set_flags(pmd, _PAGE_ACCESSED);
 396}
 397
 398static inline pmd_t pmd_mkwrite(pmd_t pmd)
 399{
 400	return pmd_set_flags(pmd, _PAGE_RW);
 401}
 402
 403static inline pmd_t pmd_mknotpresent(pmd_t pmd)
 404{
 405	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
 406}
 407
 408static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
 
 409{
 410	pudval_t v = native_pud_val(pud);
 411
 412	return native_make_pud(v | set);
 
 413}
 414
 415static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
 
 416{
 417	pudval_t v = native_pud_val(pud);
 418
 419	return native_make_pud(v & ~clear);
 
 420}
 421
 422static inline pud_t pud_mkold(pud_t pud)
 423{
 424	return pud_clear_flags(pud, _PAGE_ACCESSED);
 425}
 426
 427static inline pud_t pud_mkclean(pud_t pud)
 428{
 429	return pud_clear_flags(pud, _PAGE_DIRTY);
 430}
 431
 432static inline pud_t pud_wrprotect(pud_t pud)
 433{
 434	return pud_clear_flags(pud, _PAGE_RW);
 
 
 
 
 
 
 
 435}
 436
 437static inline pud_t pud_mkdirty(pud_t pud)
 438{
 439	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 
 
 440}
 441
 442static inline pud_t pud_mkdevmap(pud_t pud)
 443{
 444	return pud_set_flags(pud, _PAGE_DEVMAP);
 445}
 446
 447static inline pud_t pud_mkhuge(pud_t pud)
 448{
 449	return pud_set_flags(pud, _PAGE_PSE);
 450}
 451
 452static inline pud_t pud_mkyoung(pud_t pud)
 453{
 454	return pud_set_flags(pud, _PAGE_ACCESSED);
 455}
 456
 457static inline pud_t pud_mkwrite(pud_t pud)
 458{
 459	return pud_set_flags(pud, _PAGE_RW);
 460}
 461
 462static inline pud_t pud_mknotpresent(pud_t pud)
 463{
 464	return pud_clear_flags(pud, _PAGE_PRESENT | _PAGE_PROTNONE);
 465}
 466
 467#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 468static inline int pte_soft_dirty(pte_t pte)
 469{
 470	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
 471}
 472
 473static inline int pmd_soft_dirty(pmd_t pmd)
 474{
 475	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
 476}
 477
 478static inline int pud_soft_dirty(pud_t pud)
 479{
 480	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
 481}
 482
 483static inline pte_t pte_mksoft_dirty(pte_t pte)
 484{
 485	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
 486}
 487
 488static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 489{
 490	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 491}
 492
 493static inline pud_t pud_mksoft_dirty(pud_t pud)
 494{
 495	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
 496}
 497
 498static inline pte_t pte_clear_soft_dirty(pte_t pte)
 499{
 500	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
 501}
 502
 503static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
 504{
 505	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
 506}
 507
 508static inline pud_t pud_clear_soft_dirty(pud_t pud)
 509{
 510	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
 511}
 512
 513#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 514
 515/*
 516 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 517 * can use those bits for other purposes, so leave them be.
 518 */
 519static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
 520{
 521	pgprotval_t protval = pgprot_val(pgprot);
 522
 523	if (protval & _PAGE_PRESENT)
 524		protval &= __supported_pte_mask;
 525
 526	return protval;
 527}
 528
 529static inline pgprotval_t check_pgprot(pgprot_t pgprot)
 530{
 531	pgprotval_t massaged_val = massage_pgprot(pgprot);
 532
 533	/* mmdebug.h can not be included here because of dependencies */
 534#ifdef CONFIG_DEBUG_VM
 535	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
 536		  "attempted to set unsupported pgprot: %016llx "
 537		  "bits: %016llx supported: %016llx\n",
 538		  (u64)pgprot_val(pgprot),
 539		  (u64)pgprot_val(pgprot) ^ massaged_val,
 540		  (u64)__supported_pte_mask);
 541#endif
 542
 543	return massaged_val;
 544}
 545
 546static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 547{
 548	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
 549		     check_pgprot(pgprot));
 
 
 550}
 551
 552static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 553{
 554	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
 555		     check_pgprot(pgprot));
 
 
 556}
 557
 558static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
 559{
 560	return __pud(((phys_addr_t)page_nr << PAGE_SHIFT) |
 561		     check_pgprot(pgprot));
 
 
 562}
 563
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 564static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 565{
 566	pteval_t val = pte_val(pte);
 
 567
 568	/*
 569	 * Chop off the NX bit (if present), and add the NX portion of
 570	 * the newprot (if present):
 571	 */
 572	val &= _PAGE_CHG_MASK;
 573	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 574
 575	return __pte(val);
 576}
 577
 578static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 579{
 580	pmdval_t val = pmd_val(pmd);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 581
 582	val &= _HPAGE_CHG_MASK;
 583	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 
 
 
 584
 585	return __pmd(val);
 
 
 
 
 
 
 
 
 
 586}
 587
 588/* mprotect needs to preserve PAT bits when updating vm_page_prot */
 
 
 
 589#define pgprot_modify pgprot_modify
 590static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 591{
 592	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
 593	pgprotval_t addbits = pgprot_val(newprot);
 594	return __pgprot(preservebits | addbits);
 595}
 596
 597#define pte_pgprot(x) __pgprot(pte_flags(x))
 598#define pmd_pgprot(x) __pgprot(pmd_flags(x))
 599#define pud_pgprot(x) __pgprot(pud_flags(x))
 600#define p4d_pgprot(x) __pgprot(p4d_flags(x))
 601
 602#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 603
 604static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
 605{
 606	return canon_pgprot(prot);
 607}
 608
 609static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
 610					 enum page_cache_mode pcm,
 611					 enum page_cache_mode new_pcm)
 612{
 613	/*
 614	 * PAT type is always WB for untracked ranges, so no need to check.
 615	 */
 616	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
 617		return 1;
 618
 619	/*
 620	 * Certain new memtypes are not allowed with certain
 621	 * requested memtype:
 622	 * - request is uncached, return cannot be write-back
 623	 * - request is write-combine, return cannot be write-back
 624	 * - request is write-through, return cannot be write-back
 625	 * - request is write-through, return cannot be write-combine
 626	 */
 627	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
 628	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 629	    (pcm == _PAGE_CACHE_MODE_WC &&
 630	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 631	    (pcm == _PAGE_CACHE_MODE_WT &&
 632	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 633	    (pcm == _PAGE_CACHE_MODE_WT &&
 634	     new_pcm == _PAGE_CACHE_MODE_WC)) {
 635		return 0;
 636	}
 637
 638	return 1;
 639}
 640
 641pmd_t *populate_extra_pmd(unsigned long vaddr);
 642pte_t *populate_extra_pte(unsigned long vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 643#endif	/* __ASSEMBLY__ */
 644
 
 645#ifdef CONFIG_X86_32
 646# include <asm/pgtable_32.h>
 647#else
 648# include <asm/pgtable_64.h>
 649#endif
 650
 651#ifndef __ASSEMBLY__
 652#include <linux/mm_types.h>
 653#include <linux/mmdebug.h>
 654#include <linux/log2.h>
 655#include <asm/fixmap.h>
 656
 657static inline int pte_none(pte_t pte)
 658{
 659	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
 660}
 661
 662#define __HAVE_ARCH_PTE_SAME
 663static inline int pte_same(pte_t a, pte_t b)
 664{
 665	return a.pte == b.pte;
 666}
 667
 
 
 
 
 
 
 
 
 668static inline int pte_present(pte_t a)
 669{
 670	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
 671}
 672
 673#ifdef __HAVE_ARCH_PTE_DEVMAP
 674static inline int pte_devmap(pte_t a)
 675{
 676	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
 677}
 678#endif
 679
 680#define pte_accessible pte_accessible
 681static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
 682{
 683	if (pte_flags(a) & _PAGE_PRESENT)
 684		return true;
 685
 686	if ((pte_flags(a) & _PAGE_PROTNONE) &&
 687			mm_tlb_flush_pending(mm))
 688		return true;
 689
 690	return false;
 691}
 692
 693static inline int pmd_present(pmd_t pmd)
 694{
 695	/*
 696	 * Checking for _PAGE_PSE is needed too because
 697	 * split_huge_page will temporarily clear the present bit (but
 698	 * the _PAGE_PSE flag will remain set at all times while the
 699	 * _PAGE_PRESENT bit is clear).
 700	 */
 701	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
 702}
 703
 704#ifdef CONFIG_NUMA_BALANCING
 705/*
 706 * These work without NUMA balancing but the kernel does not care. See the
 707 * comment in include/asm-generic/pgtable.h
 708 */
 709static inline int pte_protnone(pte_t pte)
 710{
 711	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
 712		== _PAGE_PROTNONE;
 713}
 714
 715static inline int pmd_protnone(pmd_t pmd)
 716{
 717	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
 718		== _PAGE_PROTNONE;
 719}
 720#endif /* CONFIG_NUMA_BALANCING */
 721
 722static inline int pmd_none(pmd_t pmd)
 723{
 724	/* Only check low word on 32-bit platforms, since it might be
 725	   out of sync with upper half. */
 726	unsigned long val = native_pmd_val(pmd);
 727	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
 728}
 729
 730static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 731{
 732	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
 733}
 734
 735/*
 736 * Currently stuck as a macro due to indirect forward reference to
 737 * linux/mmzone.h's __section_mem_map_addr() definition:
 738 */
 739#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
 740
 741/*
 742 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
 743 *
 744 * this macro returns the index of the entry in the pmd page which would
 745 * control the given virtual address
 746 */
 747static inline unsigned long pmd_index(unsigned long address)
 748{
 749	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
 750}
 751
 752/*
 753 * Conversion functions: convert a page and protection to a page entry,
 754 * and a page entry and page directory to the page they refer to.
 755 *
 756 * (Currently stuck as a macro because of indirect forward reference
 757 * to linux/mm.h:page_to_nid())
 758 */
 759#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 760
 761/*
 762 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
 763 *
 764 * this function returns the index of the entry in the pte page which would
 765 * control the given virtual address
 766 */
 767static inline unsigned long pte_index(unsigned long address)
 768{
 769	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 770}
 771
 772static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
 773{
 774	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
 775}
 776
 777static inline int pmd_bad(pmd_t pmd)
 778{
 779	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
 
 780}
 781
 782static inline unsigned long pages_to_mb(unsigned long npg)
 783{
 784	return npg >> (20 - PAGE_SHIFT);
 785}
 786
 787#if CONFIG_PGTABLE_LEVELS > 2
 788static inline int pud_none(pud_t pud)
 789{
 790	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
 791}
 792
 793static inline int pud_present(pud_t pud)
 794{
 795	return pud_flags(pud) & _PAGE_PRESENT;
 796}
 797
 798static inline unsigned long pud_page_vaddr(pud_t pud)
 799{
 800	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
 801}
 802
 803/*
 804 * Currently stuck as a macro due to indirect forward reference to
 805 * linux/mmzone.h's __section_mem_map_addr() definition:
 806 */
 807#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
 808
 809/* Find an entry in the second-level page table.. */
 810static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
 811{
 812	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
 813}
 814
 815static inline int pud_large(pud_t pud)
 816{
 817	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
 818		(_PAGE_PSE | _PAGE_PRESENT);
 819}
 820
 821static inline int pud_bad(pud_t pud)
 822{
 823	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
 824}
 825#else
 826static inline int pud_large(pud_t pud)
 827{
 828	return 0;
 829}
 830#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
 831
 832static inline unsigned long pud_index(unsigned long address)
 833{
 834	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
 835}
 836
 837#if CONFIG_PGTABLE_LEVELS > 3
 838static inline int p4d_none(p4d_t p4d)
 839{
 840	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
 841}
 842
 843static inline int p4d_present(p4d_t p4d)
 844{
 845	return p4d_flags(p4d) & _PAGE_PRESENT;
 846}
 847
 848static inline unsigned long p4d_page_vaddr(p4d_t p4d)
 849{
 850	return (unsigned long)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
 851}
 852
 853/*
 854 * Currently stuck as a macro due to indirect forward reference to
 855 * linux/mmzone.h's __section_mem_map_addr() definition:
 856 */
 857#define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
 858
 859/* Find an entry in the third-level page table.. */
 860static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
 861{
 862	return (pud_t *)p4d_page_vaddr(*p4d) + pud_index(address);
 863}
 864
 865static inline int p4d_bad(p4d_t p4d)
 866{
 867	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
 868
 869	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
 870		ignore_flags |= _PAGE_NX;
 871
 872	return (p4d_flags(p4d) & ~ignore_flags) != 0;
 873}
 874#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 875
 876static inline unsigned long p4d_index(unsigned long address)
 877{
 878	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
 879}
 880
 881#if CONFIG_PGTABLE_LEVELS > 4
 882static inline int pgd_present(pgd_t pgd)
 883{
 884	if (!pgtable_l5_enabled)
 885		return 1;
 886	return pgd_flags(pgd) & _PAGE_PRESENT;
 887}
 888
 889static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 890{
 891	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
 892}
 893
 894/*
 895 * Currently stuck as a macro due to indirect forward reference to
 896 * linux/mmzone.h's __section_mem_map_addr() definition:
 897 */
 898#define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
 899
 900/* to find an entry in a page-table-directory. */
 901static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 902{
 903	if (!pgtable_l5_enabled)
 904		return (p4d_t *)pgd;
 905	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
 906}
 907
 908static inline int pgd_bad(pgd_t pgd)
 909{
 910	unsigned long ignore_flags = _PAGE_USER;
 911
 912	if (!pgtable_l5_enabled)
 913		return 0;
 914
 915	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
 916		ignore_flags |= _PAGE_NX;
 917
 918	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
 919}
 920
 921static inline int pgd_none(pgd_t pgd)
 922{
 923	if (!pgtable_l5_enabled)
 924		return 0;
 925	/*
 926	 * There is no need to do a workaround for the KNL stray
 927	 * A/D bit erratum here.  PGDs only point to page tables
 928	 * except on 32-bit non-PAE which is not supported on
 929	 * KNL.
 930	 */
 931	return !native_pgd_val(pgd);
 932}
 933#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
 934
 935#endif	/* __ASSEMBLY__ */
 936
 937/*
 938 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
 939 *
 940 * this macro returns the index of the entry in the pgd page which would
 941 * control the given virtual address
 942 */
 943#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
 944
 945/*
 946 * pgd_offset() returns a (pgd_t *)
 947 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
 948 */
 949#define pgd_offset_pgd(pgd, address) (pgd + pgd_index((address)))
 950/*
 951 * a shortcut to get a pgd_t in a given mm
 952 */
 953#define pgd_offset(mm, address) pgd_offset_pgd((mm)->pgd, (address))
 954/*
 955 * a shortcut which implies the use of the kernel's pgd, instead
 956 * of a process's
 957 */
 958#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
 959
 960
 961#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
 962#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 963
 964#ifndef __ASSEMBLY__
 965
 966extern int direct_gbpages;
 967void init_mem_mapping(void);
 968void early_alloc_pgt_buf(void);
 969extern void memblock_find_dma_reserve(void);
 
 
 970
 971#ifdef CONFIG_X86_64
 972/* Realmode trampoline initialization. */
 973extern pgd_t trampoline_pgd_entry;
 974static inline void __meminit init_trampoline_default(void)
 975{
 976	/* Default trampoline pgd value */
 977	trampoline_pgd_entry = init_top_pgt[pgd_index(__PAGE_OFFSET)];
 978}
 979# ifdef CONFIG_RANDOMIZE_MEMORY
 980void __meminit init_trampoline(void);
 981# else
 982#  define init_trampoline init_trampoline_default
 983# endif
 984#else
 985static inline void init_trampoline(void) { }
 986#endif
 987
 988/* local pte updates need not use xchg for locking */
 989static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 990{
 991	pte_t res = *ptep;
 992
 993	/* Pure native function needs no input for mm, addr */
 994	native_pte_clear(NULL, 0, ptep);
 995	return res;
 996}
 997
 998static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
 999{
1000	pmd_t res = *pmdp;
1001
1002	native_pmd_clear(pmdp);
1003	return res;
1004}
1005
1006static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1007{
1008	pud_t res = *pudp;
1009
1010	native_pud_clear(pudp);
1011	return res;
1012}
1013
1014static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
1015				     pte_t *ptep , pte_t pte)
1016{
1017	native_set_pte(ptep, pte);
1018}
1019
1020static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1021			      pmd_t *pmdp, pmd_t pmd)
1022{
1023	native_set_pmd(pmdp, pmd);
 
1024}
1025
1026static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1027			      pud_t *pudp, pud_t pud)
1028{
 
1029	native_set_pud(pudp, pud);
1030}
1031
1032/*
1033 * We only update the dirty/accessed state if we set
1034 * the dirty bit by hand in the kernel, since the hardware
1035 * will do the accessed bit for us, and we don't want to
1036 * race with other CPU's that might be updating the dirty
1037 * bit at the same time.
1038 */
1039struct vm_area_struct;
1040
1041#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1042extern int ptep_set_access_flags(struct vm_area_struct *vma,
1043				 unsigned long address, pte_t *ptep,
1044				 pte_t entry, int dirty);
1045
1046#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1047extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1048				     unsigned long addr, pte_t *ptep);
1049
1050#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1051extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1052				  unsigned long address, pte_t *ptep);
1053
1054#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1055static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1056				       pte_t *ptep)
1057{
1058	pte_t pte = native_ptep_get_and_clear(ptep);
 
1059	return pte;
1060}
1061
1062#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1063static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1064					    unsigned long addr, pte_t *ptep,
1065					    int full)
1066{
1067	pte_t pte;
1068	if (full) {
1069		/*
1070		 * Full address destruction in progress; paravirt does not
1071		 * care about updates and native needs no locking
1072		 */
1073		pte = native_local_ptep_get_and_clear(ptep);
 
1074	} else {
1075		pte = ptep_get_and_clear(mm, addr, ptep);
1076	}
1077	return pte;
1078}
1079
1080#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1081static inline void ptep_set_wrprotect(struct mm_struct *mm,
1082				      unsigned long addr, pte_t *ptep)
1083{
1084	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 
 
 
 
 
 
 
 
 
 
1085}
1086
1087#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1088
1089#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1090
1091#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1092extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1093				 unsigned long address, pmd_t *pmdp,
1094				 pmd_t entry, int dirty);
1095extern int pudp_set_access_flags(struct vm_area_struct *vma,
1096				 unsigned long address, pud_t *pudp,
1097				 pud_t entry, int dirty);
1098
1099#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1100extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1101				     unsigned long addr, pmd_t *pmdp);
1102extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1103				     unsigned long addr, pud_t *pudp);
1104
1105#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1106extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1107				  unsigned long address, pmd_t *pmdp);
1108
1109
1110#define pmd_write pmd_write
1111static inline int pmd_write(pmd_t pmd)
1112{
1113	return pmd_flags(pmd) & _PAGE_RW;
1114}
1115
1116#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1117static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1118				       pmd_t *pmdp)
1119{
1120	return native_pmdp_get_and_clear(pmdp);
 
 
 
 
1121}
1122
1123#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1124static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1125					unsigned long addr, pud_t *pudp)
1126{
1127	return native_pudp_get_and_clear(pudp);
 
 
 
 
1128}
1129
1130#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1131static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1132				      unsigned long addr, pmd_t *pmdp)
1133{
1134	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1135}
 
 
 
 
1136
1137#define pud_write pud_write
1138static inline int pud_write(pud_t pud)
1139{
1140	return pud_flags(pud) & _PAGE_RW;
1141}
1142
1143#ifndef pmdp_establish
1144#define pmdp_establish pmdp_establish
1145static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1146		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1147{
 
1148	if (IS_ENABLED(CONFIG_SMP)) {
1149		return xchg(pmdp, pmd);
1150	} else {
1151		pmd_t old = *pmdp;
1152		*pmdp = pmd;
1153		return old;
1154	}
1155}
1156#endif
1157
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1158/*
1159 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1160 *
1161 *  dst - pointer to pgd range anwhere on a pgd page
1162 *  src - ""
1163 *  count - the number of pgds to copy.
1164 *
1165 * dst and src can be on the same page, but the range must not overlap,
1166 * and must not cross a page boundary.
1167 */
1168static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1169{
1170	memcpy(dst, src, count * sizeof(pgd_t));
1171#ifdef CONFIG_PAGE_TABLE_ISOLATION
1172	if (!static_cpu_has(X86_FEATURE_PTI))
1173		return;
1174	/* Clone the user space pgd as well */
1175	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1176	       count * sizeof(pgd_t));
1177#endif
1178}
1179
1180#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1181static inline int page_level_shift(enum pg_level level)
1182{
1183	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1184}
1185static inline unsigned long page_level_size(enum pg_level level)
1186{
1187	return 1UL << page_level_shift(level);
1188}
1189static inline unsigned long page_level_mask(enum pg_level level)
1190{
1191	return ~(page_level_size(level) - 1);
1192}
1193
1194/*
1195 * The x86 doesn't have any external MMU info: the kernel page
1196 * tables contain all the necessary information.
1197 */
1198static inline void update_mmu_cache(struct vm_area_struct *vma,
1199		unsigned long addr, pte_t *ptep)
1200{
1201}
 
 
 
 
 
1202static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1203		unsigned long addr, pmd_t *pmd)
1204{
1205}
1206static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1207		unsigned long addr, pud_t *pud)
1208{
1209}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1210
1211#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1212static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1213{
1214	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1215}
1216
1217static inline int pte_swp_soft_dirty(pte_t pte)
1218{
1219	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1220}
1221
1222static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1223{
1224	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1225}
1226
1227#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1228static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1229{
1230	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1231}
1232
1233static inline int pmd_swp_soft_dirty(pmd_t pmd)
1234{
1235	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1236}
1237
1238static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1239{
1240	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1241}
1242#endif
1243#endif
1244
1245#define PKRU_AD_BIT 0x1
1246#define PKRU_WD_BIT 0x2
1247#define PKRU_BITS_PER_PKEY 2
 
 
 
 
 
 
 
1248
1249static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
1250{
1251	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1252	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
1253}
1254
1255static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
1256{
1257	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
1258	/*
1259	 * Access-disable disables writes too so we need to check
1260	 * both bits here.
1261	 */
1262	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
1263}
1264
 
 
 
 
 
 
 
 
 
 
 
1265static inline u16 pte_flags_pkey(unsigned long pte_flags)
1266{
1267#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1268	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1269	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1270#else
1271	return 0;
1272#endif
1273}
1274
1275static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1276{
1277	u32 pkru = read_pkru();
1278
1279	if (!__pkru_allows_read(pkru, pkey))
1280		return false;
1281	if (write && !__pkru_allows_write(pkru, pkey))
1282		return false;
1283
1284	return true;
1285}
1286
1287/*
1288 * 'pteval' can come from a PTE, PMD or PUD.  We only check
1289 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1290 * same value on all 3 types.
1291 */
1292static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1293{
1294	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1295
 
 
 
 
 
1296	if (write)
1297		need_pte_bits |= _PAGE_RW;
1298
1299	if ((pteval & need_pte_bits) != need_pte_bits)
1300		return 0;
1301
1302	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1303}
1304
1305#define pte_access_permitted pte_access_permitted
1306static inline bool pte_access_permitted(pte_t pte, bool write)
1307{
1308	return __pte_access_permitted(pte_val(pte), write);
1309}
1310
1311#define pmd_access_permitted pmd_access_permitted
1312static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1313{
1314	return __pte_access_permitted(pmd_val(pmd), write);
1315}
1316
1317#define pud_access_permitted pud_access_permitted
1318static inline bool pud_access_permitted(pud_t pud, bool write)
1319{
1320	return __pte_access_permitted(pud_val(pud), write);
1321}
1322
1323#include <asm-generic/pgtable.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1324#endif	/* __ASSEMBLY__ */
1325
1326#endif /* _ASM_X86_PGTABLE_H */
v6.13.7
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_PGTABLE_H
   3#define _ASM_X86_PGTABLE_H
   4
   5#include <linux/mem_encrypt.h>
   6#include <asm/page.h>
   7#include <asm/pgtable_types.h>
   8
   9/*
  10 * Macro to mark a page protection value as UC-
  11 */
  12#define pgprot_noncached(prot)						\
  13	((boot_cpu_data.x86 > 3)					\
  14	 ? (__pgprot(pgprot_val(prot) |					\
  15		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
  16	 : (prot))
  17
 
 
 
 
 
 
  18#ifndef __ASSEMBLY__
  19#include <linux/spinlock.h>
  20#include <asm/x86_init.h>
  21#include <asm/pkru.h>
  22#include <asm/fpu/api.h>
  23#include <asm/coco.h>
  24#include <asm-generic/pgtable_uffd.h>
  25#include <linux/page_table_check.h>
  26
  27extern pgd_t early_top_pgt[PTRS_PER_PGD];
  28bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
  29
  30struct seq_file;
  31void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
  32void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
  33				   bool user);
  34bool ptdump_walk_pgd_level_checkwx(void);
  35#define ptdump_check_wx ptdump_walk_pgd_level_checkwx
  36void ptdump_walk_user_pgd_level_checkwx(void);
  37
  38/*
  39 * Macros to add or remove encryption attribute
  40 */
  41#define pgprot_encrypted(prot)	__pgprot(cc_mkenc(pgprot_val(prot)))
  42#define pgprot_decrypted(prot)	__pgprot(cc_mkdec(pgprot_val(prot)))
  43
  44#ifdef CONFIG_DEBUG_WX
  45#define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
  46#else
  47#define debug_checkwx_user()	do { } while (0)
  48#endif
  49
  50/*
  51 * ZERO_PAGE is a global shared page that is always zero: used
  52 * for zero-mapped memory areas etc..
  53 */
  54extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  55	__visible;
  56#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
  57
  58extern spinlock_t pgd_lock;
  59extern struct list_head pgd_list;
  60
  61extern struct mm_struct *pgd_page_get_mm(struct page *page);
  62
  63extern pmdval_t early_pmd_flags;
  64
  65#ifdef CONFIG_PARAVIRT_XXL
  66#include <asm/paravirt.h>
  67#else  /* !CONFIG_PARAVIRT_XXL */
  68#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 
  69
  70#define set_pte_atomic(ptep, pte)					\
  71	native_set_pte_atomic(ptep, pte)
  72
  73#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
  74
  75#ifndef __PAGETABLE_P4D_FOLDED
  76#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
  77#define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
  78#endif
  79
  80#ifndef set_p4d
  81# define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
  82#endif
  83
  84#ifndef __PAGETABLE_PUD_FOLDED
  85#define p4d_clear(p4d)			native_p4d_clear(p4d)
  86#endif
  87
  88#ifndef set_pud
  89# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
  90#endif
  91
  92#ifndef __PAGETABLE_PUD_FOLDED
  93#define pud_clear(pud)			native_pud_clear(pud)
  94#endif
  95
  96#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
  97#define pmd_clear(pmd)			native_pmd_clear(pmd)
  98
  99#define pgd_val(x)	native_pgd_val(x)
 100#define __pgd(x)	native_make_pgd(x)
 101
 102#ifndef __PAGETABLE_P4D_FOLDED
 103#define p4d_val(x)	native_p4d_val(x)
 104#define __p4d(x)	native_make_p4d(x)
 105#endif
 106
 107#ifndef __PAGETABLE_PUD_FOLDED
 108#define pud_val(x)	native_pud_val(x)
 109#define __pud(x)	native_make_pud(x)
 110#endif
 111
 112#ifndef __PAGETABLE_PMD_FOLDED
 113#define pmd_val(x)	native_pmd_val(x)
 114#define __pmd(x)	native_make_pmd(x)
 115#endif
 116
 117#define pte_val(x)	native_pte_val(x)
 118#define __pte(x)	native_make_pte(x)
 119
 120#define arch_end_context_switch(prev)	do {} while(0)
 121#endif	/* CONFIG_PARAVIRT_XXL */
 122
 123static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 124{
 125	pmdval_t v = native_pmd_val(pmd);
 126
 127	return native_make_pmd(v | set);
 128}
 129
 130static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 131{
 132	pmdval_t v = native_pmd_val(pmd);
 133
 134	return native_make_pmd(v & ~clear);
 135}
 136
 137static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
 138{
 139	pudval_t v = native_pud_val(pud);
 140
 141	return native_make_pud(v | set);
 142}
 143
 144static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
 145{
 146	pudval_t v = native_pud_val(pud);
 147
 148	return native_make_pud(v & ~clear);
 149}
 150
 151/*
 152 * The following only work if pte_present() is true.
 153 * Undefined behaviour if not..
 154 */
 155static inline bool pte_dirty(pte_t pte)
 156{
 157	return pte_flags(pte) & _PAGE_DIRTY_BITS;
 158}
 159
 160static inline bool pte_shstk(pte_t pte)
 161{
 162	return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
 163	       (pte_flags(pte) & (_PAGE_RW | _PAGE_DIRTY)) == _PAGE_DIRTY;
 164}
 165
 166static inline int pte_young(pte_t pte)
 167{
 168	return pte_flags(pte) & _PAGE_ACCESSED;
 
 
 169}
 170
 171static inline bool pte_decrypted(pte_t pte)
 172{
 173	return cc_mkdec(pte_val(pte)) == pte_val(pte);
 
 174}
 175
 176#define pmd_dirty pmd_dirty
 177static inline bool pmd_dirty(pmd_t pmd)
 178{
 179	return pmd_flags(pmd) & _PAGE_DIRTY_BITS;
 180}
 181
 182static inline bool pmd_shstk(pmd_t pmd)
 183{
 184	return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
 185	       (pmd_flags(pmd) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
 186	       (_PAGE_DIRTY | _PAGE_PSE);
 187}
 188
 189#define pmd_young pmd_young
 190static inline int pmd_young(pmd_t pmd)
 191{
 192	return pmd_flags(pmd) & _PAGE_ACCESSED;
 193}
 194
 195static inline bool pud_dirty(pud_t pud)
 196{
 197	return pud_flags(pud) & _PAGE_DIRTY_BITS;
 198}
 199
 200static inline int pud_young(pud_t pud)
 201{
 202	return pud_flags(pud) & _PAGE_ACCESSED;
 203}
 204
 205static inline bool pud_shstk(pud_t pud)
 206{
 207	return cpu_feature_enabled(X86_FEATURE_SHSTK) &&
 208	       (pud_flags(pud) & (_PAGE_RW | _PAGE_DIRTY | _PAGE_PSE)) ==
 209	       (_PAGE_DIRTY | _PAGE_PSE);
 210}
 211
 212static inline int pte_write(pte_t pte)
 213{
 214	/*
 215	 * Shadow stack pages are logically writable, but do not have
 216	 * _PAGE_RW.  Check for them separately from _PAGE_RW itself.
 217	 */
 218	return (pte_flags(pte) & _PAGE_RW) || pte_shstk(pte);
 219}
 220
 221#define pmd_write pmd_write
 222static inline int pmd_write(pmd_t pmd)
 223{
 224	/*
 225	 * Shadow stack pages are logically writable, but do not have
 226	 * _PAGE_RW.  Check for them separately from _PAGE_RW itself.
 227	 */
 228	return (pmd_flags(pmd) & _PAGE_RW) || pmd_shstk(pmd);
 229}
 230
 231#define pud_write pud_write
 232static inline int pud_write(pud_t pud)
 233{
 234	return pud_flags(pud) & _PAGE_RW;
 235}
 236
 237static inline int pte_huge(pte_t pte)
 238{
 239	return pte_flags(pte) & _PAGE_PSE;
 240}
 241
 242static inline int pte_global(pte_t pte)
 243{
 244	return pte_flags(pte) & _PAGE_GLOBAL;
 245}
 246
 247static inline int pte_exec(pte_t pte)
 248{
 249	return !(pte_flags(pte) & _PAGE_NX);
 250}
 251
 252static inline int pte_special(pte_t pte)
 253{
 254	return pte_flags(pte) & _PAGE_SPECIAL;
 255}
 256
 257/* Entries that were set to PROT_NONE are inverted */
 258
 259static inline u64 protnone_mask(u64 val);
 260
 261#define PFN_PTE_SHIFT	PAGE_SHIFT
 262
 263static inline unsigned long pte_pfn(pte_t pte)
 264{
 265	phys_addr_t pfn = pte_val(pte);
 266	pfn ^= protnone_mask(pfn);
 267	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
 268}
 269
 270static inline unsigned long pmd_pfn(pmd_t pmd)
 271{
 272	phys_addr_t pfn = pmd_val(pmd);
 273	pfn ^= protnone_mask(pfn);
 274	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
 275}
 276
 277#define pud_pfn pud_pfn
 278static inline unsigned long pud_pfn(pud_t pud)
 279{
 280	phys_addr_t pfn = pud_val(pud);
 281	pfn ^= protnone_mask(pfn);
 282	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
 283}
 284
 285static inline unsigned long p4d_pfn(p4d_t p4d)
 286{
 287	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
 288}
 289
 290static inline unsigned long pgd_pfn(pgd_t pgd)
 291{
 292	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 293}
 294
 295#define p4d_leaf p4d_leaf
 296static inline bool p4d_leaf(p4d_t p4d)
 297{
 298	/* No 512 GiB pages yet */
 299	return 0;
 300}
 301
 302#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
 303
 304#define pmd_leaf pmd_leaf
 305static inline bool pmd_leaf(pmd_t pte)
 306{
 307	return pmd_flags(pte) & _PAGE_PSE;
 308}
 309
 310#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 311/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_leaf */
 312static inline int pmd_trans_huge(pmd_t pmd)
 313{
 314	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 315}
 316
 317#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 318static inline int pud_trans_huge(pud_t pud)
 319{
 320	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 321}
 322#endif
 323
 324#define has_transparent_hugepage has_transparent_hugepage
 325static inline int has_transparent_hugepage(void)
 326{
 327	return boot_cpu_has(X86_FEATURE_PSE);
 328}
 329
 330#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
 331static inline int pmd_devmap(pmd_t pmd)
 332{
 333	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
 334}
 335
 336#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 337static inline int pud_devmap(pud_t pud)
 338{
 339	return !!(pud_val(pud) & _PAGE_DEVMAP);
 340}
 341#else
 342static inline int pud_devmap(pud_t pud)
 343{
 344	return 0;
 345}
 346#endif
 347
 348#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
 349static inline bool pmd_special(pmd_t pmd)
 350{
 351	return pmd_flags(pmd) & _PAGE_SPECIAL;
 352}
 353
 354static inline pmd_t pmd_mkspecial(pmd_t pmd)
 355{
 356	return pmd_set_flags(pmd, _PAGE_SPECIAL);
 357}
 358#endif	/* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */
 359
 360#ifdef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP
 361static inline bool pud_special(pud_t pud)
 362{
 363	return pud_flags(pud) & _PAGE_SPECIAL;
 364}
 365
 366static inline pud_t pud_mkspecial(pud_t pud)
 367{
 368	return pud_set_flags(pud, _PAGE_SPECIAL);
 369}
 370#endif	/* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
 371
 372static inline int pgd_devmap(pgd_t pgd)
 373{
 374	return 0;
 375}
 376#endif
 377#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 378
 379static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 380{
 381	pteval_t v = native_pte_val(pte);
 382
 383	return native_make_pte(v | set);
 384}
 385
 386static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
 387{
 388	pteval_t v = native_pte_val(pte);
 389
 390	return native_make_pte(v & ~clear);
 391}
 392
 393/*
 394 * Write protection operations can result in Dirty=1,Write=0 PTEs. But in the
 395 * case of X86_FEATURE_USER_SHSTK, these PTEs denote shadow stack memory. So
 396 * when creating dirty, write-protected memory, a software bit is used:
 397 * _PAGE_BIT_SAVED_DIRTY. The following functions take a PTE and transition the
 398 * Dirty bit to SavedDirty, and vice-vesra.
 399 *
 400 * This shifting is only done if needed. In the case of shifting
 401 * Dirty->SavedDirty, the condition is if the PTE is Write=0. In the case of
 402 * shifting SavedDirty->Dirty, the condition is Write=1.
 403 */
 404static inline pgprotval_t mksaveddirty_shift(pgprotval_t v)
 405{
 406	pgprotval_t cond = (~v >> _PAGE_BIT_RW) & 1;
 407
 408	v |= ((v >> _PAGE_BIT_DIRTY) & cond) << _PAGE_BIT_SAVED_DIRTY;
 409	v &= ~(cond << _PAGE_BIT_DIRTY);
 410
 411	return v;
 412}
 413
 414static inline pgprotval_t clear_saveddirty_shift(pgprotval_t v)
 415{
 416	pgprotval_t cond = (v >> _PAGE_BIT_RW) & 1;
 417
 418	v |= ((v >> _PAGE_BIT_SAVED_DIRTY) & cond) << _PAGE_BIT_DIRTY;
 419	v &= ~(cond << _PAGE_BIT_SAVED_DIRTY);
 420
 421	return v;
 422}
 423
 424static inline pte_t pte_mksaveddirty(pte_t pte)
 425{
 426	pteval_t v = native_pte_val(pte);
 427
 428	v = mksaveddirty_shift(v);
 429	return native_make_pte(v);
 430}
 431
 432static inline pte_t pte_clear_saveddirty(pte_t pte)
 433{
 434	pteval_t v = native_pte_val(pte);
 435
 436	v = clear_saveddirty_shift(v);
 437	return native_make_pte(v);
 438}
 439
 440static inline pte_t pte_wrprotect(pte_t pte)
 441{
 442	pte = pte_clear_flags(pte, _PAGE_RW);
 443
 444	/*
 445	 * Blindly clearing _PAGE_RW might accidentally create
 446	 * a shadow stack PTE (Write=0,Dirty=1). Move the hardware
 447	 * dirty value to the software bit, if present.
 448	 */
 449	return pte_mksaveddirty(pte);
 450}
 451
 452#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
 453static inline int pte_uffd_wp(pte_t pte)
 454{
 455	return pte_flags(pte) & _PAGE_UFFD_WP;
 456}
 457
 458static inline pte_t pte_mkuffd_wp(pte_t pte)
 459{
 460	return pte_wrprotect(pte_set_flags(pte, _PAGE_UFFD_WP));
 461}
 462
 463static inline pte_t pte_clear_uffd_wp(pte_t pte)
 464{
 465	return pte_clear_flags(pte, _PAGE_UFFD_WP);
 466}
 467#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
 468
 469static inline pte_t pte_mkclean(pte_t pte)
 470{
 471	return pte_clear_flags(pte, _PAGE_DIRTY_BITS);
 472}
 473
 474static inline pte_t pte_mkold(pte_t pte)
 475{
 476	return pte_clear_flags(pte, _PAGE_ACCESSED);
 477}
 478
 479static inline pte_t pte_mkexec(pte_t pte)
 480{
 481	return pte_clear_flags(pte, _PAGE_NX);
 482}
 483
 484static inline pte_t pte_mkdirty(pte_t pte)
 485{
 486	pte = pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 487
 488	return pte_mksaveddirty(pte);
 489}
 490
 491static inline pte_t pte_mkwrite_shstk(pte_t pte)
 492{
 493	pte = pte_clear_flags(pte, _PAGE_RW);
 494
 495	return pte_set_flags(pte, _PAGE_DIRTY);
 496}
 497
 498static inline pte_t pte_mkyoung(pte_t pte)
 499{
 500	return pte_set_flags(pte, _PAGE_ACCESSED);
 501}
 502
 503static inline pte_t pte_mkwrite_novma(pte_t pte)
 504{
 505	return pte_set_flags(pte, _PAGE_RW);
 506}
 507
 508struct vm_area_struct;
 509pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma);
 510#define pte_mkwrite pte_mkwrite
 511
 512static inline pte_t pte_mkhuge(pte_t pte)
 513{
 514	return pte_set_flags(pte, _PAGE_PSE);
 515}
 516
 517static inline pte_t pte_clrhuge(pte_t pte)
 518{
 519	return pte_clear_flags(pte, _PAGE_PSE);
 520}
 521
 522static inline pte_t pte_mkglobal(pte_t pte)
 523{
 524	return pte_set_flags(pte, _PAGE_GLOBAL);
 525}
 526
 527static inline pte_t pte_clrglobal(pte_t pte)
 528{
 529	return pte_clear_flags(pte, _PAGE_GLOBAL);
 530}
 531
 532static inline pte_t pte_mkspecial(pte_t pte)
 533{
 534	return pte_set_flags(pte, _PAGE_SPECIAL);
 535}
 536
 537static inline pte_t pte_mkdevmap(pte_t pte)
 538{
 539	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
 540}
 541
 542/* See comments above mksaveddirty_shift() */
 543static inline pmd_t pmd_mksaveddirty(pmd_t pmd)
 544{
 545	pmdval_t v = native_pmd_val(pmd);
 546
 547	v = mksaveddirty_shift(v);
 548	return native_make_pmd(v);
 549}
 550
 551/* See comments above mksaveddirty_shift() */
 552static inline pmd_t pmd_clear_saveddirty(pmd_t pmd)
 553{
 554	pmdval_t v = native_pmd_val(pmd);
 555
 556	v = clear_saveddirty_shift(v);
 557	return native_make_pmd(v);
 558}
 559
 560static inline pmd_t pmd_wrprotect(pmd_t pmd)
 561{
 562	pmd = pmd_clear_flags(pmd, _PAGE_RW);
 563
 564	/*
 565	 * Blindly clearing _PAGE_RW might accidentally create
 566	 * a shadow stack PMD (RW=0, Dirty=1). Move the hardware
 567	 * dirty value to the software bit.
 568	 */
 569	return pmd_mksaveddirty(pmd);
 570}
 571
 572#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
 573static inline int pmd_uffd_wp(pmd_t pmd)
 574{
 575	return pmd_flags(pmd) & _PAGE_UFFD_WP;
 576}
 577
 578static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
 579{
 580	return pmd_wrprotect(pmd_set_flags(pmd, _PAGE_UFFD_WP));
 581}
 582
 583static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
 584{
 585	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
 586}
 587#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
 588
 589static inline pmd_t pmd_mkold(pmd_t pmd)
 590{
 591	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
 592}
 593
 594static inline pmd_t pmd_mkclean(pmd_t pmd)
 595{
 596	return pmd_clear_flags(pmd, _PAGE_DIRTY_BITS);
 597}
 598
 599static inline pmd_t pmd_mkdirty(pmd_t pmd)
 600{
 601	pmd = pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 602
 603	return pmd_mksaveddirty(pmd);
 604}
 605
 606static inline pmd_t pmd_mkwrite_shstk(pmd_t pmd)
 607{
 608	pmd = pmd_clear_flags(pmd, _PAGE_RW);
 609
 610	return pmd_set_flags(pmd, _PAGE_DIRTY);
 611}
 612
 613static inline pmd_t pmd_mkdevmap(pmd_t pmd)
 614{
 615	return pmd_set_flags(pmd, _PAGE_DEVMAP);
 616}
 617
 618static inline pmd_t pmd_mkhuge(pmd_t pmd)
 619{
 620	return pmd_set_flags(pmd, _PAGE_PSE);
 621}
 622
 623static inline pmd_t pmd_mkyoung(pmd_t pmd)
 624{
 625	return pmd_set_flags(pmd, _PAGE_ACCESSED);
 626}
 627
 628static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
 629{
 630	return pmd_set_flags(pmd, _PAGE_RW);
 631}
 632
 633pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
 634#define pmd_mkwrite pmd_mkwrite
 
 
 635
 636/* See comments above mksaveddirty_shift() */
 637static inline pud_t pud_mksaveddirty(pud_t pud)
 638{
 639	pudval_t v = native_pud_val(pud);
 640
 641	v = mksaveddirty_shift(v);
 642	return native_make_pud(v);
 643}
 644
 645/* See comments above mksaveddirty_shift() */
 646static inline pud_t pud_clear_saveddirty(pud_t pud)
 647{
 648	pudval_t v = native_pud_val(pud);
 649
 650	v = clear_saveddirty_shift(v);
 651	return native_make_pud(v);
 652}
 653
 654static inline pud_t pud_mkold(pud_t pud)
 655{
 656	return pud_clear_flags(pud, _PAGE_ACCESSED);
 657}
 658
 659static inline pud_t pud_mkclean(pud_t pud)
 660{
 661	return pud_clear_flags(pud, _PAGE_DIRTY_BITS);
 662}
 663
 664static inline pud_t pud_wrprotect(pud_t pud)
 665{
 666	pud = pud_clear_flags(pud, _PAGE_RW);
 667
 668	/*
 669	 * Blindly clearing _PAGE_RW might accidentally create
 670	 * a shadow stack PUD (RW=0, Dirty=1). Move the hardware
 671	 * dirty value to the software bit.
 672	 */
 673	return pud_mksaveddirty(pud);
 674}
 675
 676static inline pud_t pud_mkdirty(pud_t pud)
 677{
 678	pud = pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 679
 680	return pud_mksaveddirty(pud);
 681}
 682
 683static inline pud_t pud_mkdevmap(pud_t pud)
 684{
 685	return pud_set_flags(pud, _PAGE_DEVMAP);
 686}
 687
 688static inline pud_t pud_mkhuge(pud_t pud)
 689{
 690	return pud_set_flags(pud, _PAGE_PSE);
 691}
 692
 693static inline pud_t pud_mkyoung(pud_t pud)
 694{
 695	return pud_set_flags(pud, _PAGE_ACCESSED);
 696}
 697
 698static inline pud_t pud_mkwrite(pud_t pud)
 699{
 700	pud = pud_set_flags(pud, _PAGE_RW);
 
 701
 702	return pud_clear_saveddirty(pud);
 
 
 703}
 704
 705#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 706static inline int pte_soft_dirty(pte_t pte)
 707{
 708	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
 709}
 710
 711static inline int pmd_soft_dirty(pmd_t pmd)
 712{
 713	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
 714}
 715
 716static inline int pud_soft_dirty(pud_t pud)
 717{
 718	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
 719}
 720
 721static inline pte_t pte_mksoft_dirty(pte_t pte)
 722{
 723	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
 724}
 725
 726static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 727{
 728	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 729}
 730
 731static inline pud_t pud_mksoft_dirty(pud_t pud)
 732{
 733	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
 734}
 735
 736static inline pte_t pte_clear_soft_dirty(pte_t pte)
 737{
 738	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
 739}
 740
 741static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
 742{
 743	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
 744}
 745
 746static inline pud_t pud_clear_soft_dirty(pud_t pud)
 747{
 748	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
 749}
 750
 751#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 752
 753/*
 754 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 755 * can use those bits for other purposes, so leave them be.
 756 */
 757static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
 758{
 759	pgprotval_t protval = pgprot_val(pgprot);
 760
 761	if (protval & _PAGE_PRESENT)
 762		protval &= __supported_pte_mask;
 763
 764	return protval;
 765}
 766
 767static inline pgprotval_t check_pgprot(pgprot_t pgprot)
 768{
 769	pgprotval_t massaged_val = massage_pgprot(pgprot);
 770
 771	/* mmdebug.h can not be included here because of dependencies */
 772#ifdef CONFIG_DEBUG_VM
 773	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
 774		  "attempted to set unsupported pgprot: %016llx "
 775		  "bits: %016llx supported: %016llx\n",
 776		  (u64)pgprot_val(pgprot),
 777		  (u64)pgprot_val(pgprot) ^ massaged_val,
 778		  (u64)__supported_pte_mask);
 779#endif
 780
 781	return massaged_val;
 782}
 783
 784static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 785{
 786	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
 787	pfn ^= protnone_mask(pgprot_val(pgprot));
 788	pfn &= PTE_PFN_MASK;
 789	return __pte(pfn | check_pgprot(pgprot));
 790}
 791
 792static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 793{
 794	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
 795	pfn ^= protnone_mask(pgprot_val(pgprot));
 796	pfn &= PHYSICAL_PMD_PAGE_MASK;
 797	return __pmd(pfn | check_pgprot(pgprot));
 798}
 799
 800static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
 801{
 802	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
 803	pfn ^= protnone_mask(pgprot_val(pgprot));
 804	pfn &= PHYSICAL_PUD_PAGE_MASK;
 805	return __pud(pfn | check_pgprot(pgprot));
 806}
 807
 808static inline pmd_t pmd_mkinvalid(pmd_t pmd)
 809{
 810	return pfn_pmd(pmd_pfn(pmd),
 811		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
 812}
 813
 814static inline pud_t pud_mkinvalid(pud_t pud)
 815{
 816	return pfn_pud(pud_pfn(pud),
 817		       __pgprot(pud_flags(pud) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
 818}
 819
 820static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
 821
 822static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 823{
 824	pteval_t val = pte_val(pte), oldval = val;
 825	pte_t pte_result;
 826
 827	/*
 828	 * Chop off the NX bit (if present), and add the NX portion of
 829	 * the newprot (if present):
 830	 */
 831	val &= _PAGE_CHG_MASK;
 832	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
 833	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
 834
 835	pte_result = __pte(val);
 836
 837	/*
 838	 * To avoid creating Write=0,Dirty=1 PTEs, pte_modify() needs to avoid:
 839	 *  1. Marking Write=0 PTEs Dirty=1
 840	 *  2. Marking Dirty=1 PTEs Write=0
 841	 *
 842	 * The first case cannot happen because the _PAGE_CHG_MASK will filter
 843	 * out any Dirty bit passed in newprot. Handle the second case by
 844	 * going through the mksaveddirty exercise. Only do this if the old
 845	 * value was Write=1 to avoid doing this on Shadow Stack PTEs.
 846	 */
 847	if (oldval & _PAGE_RW)
 848		pte_result = pte_mksaveddirty(pte_result);
 849	else
 850		pte_result = pte_clear_saveddirty(pte_result);
 851
 852	return pte_result;
 853}
 854
 855static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 856{
 857	pmdval_t val = pmd_val(pmd), oldval = val;
 858	pmd_t pmd_result;
 859
 860	val &= (_HPAGE_CHG_MASK & ~_PAGE_DIRTY);
 861	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 862	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
 863
 864	pmd_result = __pmd(val);
 865
 866	/*
 867	 * Avoid creating shadow stack PMD by accident.  See comment in
 868	 * pte_modify().
 869	 */
 870	if (oldval & _PAGE_RW)
 871		pmd_result = pmd_mksaveddirty(pmd_result);
 872	else
 873		pmd_result = pmd_clear_saveddirty(pmd_result);
 874
 875	return pmd_result;
 876}
 877
 878static inline pud_t pud_modify(pud_t pud, pgprot_t newprot)
 879{
 880	pudval_t val = pud_val(pud), oldval = val;
 881	pud_t pud_result;
 882
 883	val &= _HPAGE_CHG_MASK;
 884	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 885	val = flip_protnone_guard(oldval, val, PHYSICAL_PUD_PAGE_MASK);
 886
 887	pud_result = __pud(val);
 888
 889	/*
 890	 * Avoid creating shadow stack PUD by accident.  See comment in
 891	 * pte_modify().
 892	 */
 893	if (oldval & _PAGE_RW)
 894		pud_result = pud_mksaveddirty(pud_result);
 895	else
 896		pud_result = pud_clear_saveddirty(pud_result);
 897
 898	return pud_result;
 899}
 900
 901/*
 902 * mprotect needs to preserve PAT and encryption bits when updating
 903 * vm_page_prot
 904 */
 905#define pgprot_modify pgprot_modify
 906static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 907{
 908	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
 909	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
 910	return __pgprot(preservebits | addbits);
 911}
 912
 913#define pte_pgprot(x) __pgprot(pte_flags(x))
 914#define pmd_pgprot(x) __pgprot(pmd_flags(x))
 915#define pud_pgprot(x) __pgprot(pud_flags(x))
 916#define p4d_pgprot(x) __pgprot(p4d_flags(x))
 917
 918#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 919
 
 
 
 
 
 920static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
 921					 enum page_cache_mode pcm,
 922					 enum page_cache_mode new_pcm)
 923{
 924	/*
 925	 * PAT type is always WB for untracked ranges, so no need to check.
 926	 */
 927	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
 928		return 1;
 929
 930	/*
 931	 * Certain new memtypes are not allowed with certain
 932	 * requested memtype:
 933	 * - request is uncached, return cannot be write-back
 934	 * - request is write-combine, return cannot be write-back
 935	 * - request is write-through, return cannot be write-back
 936	 * - request is write-through, return cannot be write-combine
 937	 */
 938	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
 939	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 940	    (pcm == _PAGE_CACHE_MODE_WC &&
 941	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 942	    (pcm == _PAGE_CACHE_MODE_WT &&
 943	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 944	    (pcm == _PAGE_CACHE_MODE_WT &&
 945	     new_pcm == _PAGE_CACHE_MODE_WC)) {
 946		return 0;
 947	}
 948
 949	return 1;
 950}
 951
 952pmd_t *populate_extra_pmd(unsigned long vaddr);
 953pte_t *populate_extra_pte(unsigned long vaddr);
 954
 955#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
 956pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
 957
 958/*
 959 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
 960 * Populates the user and returns the resulting PGD that must be set in
 961 * the kernel copy of the page tables.
 962 */
 963static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
 964{
 965	if (!static_cpu_has(X86_FEATURE_PTI))
 966		return pgd;
 967	return __pti_set_user_pgtbl(pgdp, pgd);
 968}
 969#else   /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
 970static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
 971{
 972	return pgd;
 973}
 974#endif  /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
 975
 976#endif	/* __ASSEMBLY__ */
 977
 978
 979#ifdef CONFIG_X86_32
 980# include <asm/pgtable_32.h>
 981#else
 982# include <asm/pgtable_64.h>
 983#endif
 984
 985#ifndef __ASSEMBLY__
 986#include <linux/mm_types.h>
 987#include <linux/mmdebug.h>
 988#include <linux/log2.h>
 989#include <asm/fixmap.h>
 990
 991static inline int pte_none(pte_t pte)
 992{
 993	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
 994}
 995
 996#define __HAVE_ARCH_PTE_SAME
 997static inline int pte_same(pte_t a, pte_t b)
 998{
 999	return a.pte == b.pte;
1000}
1001
1002static inline pte_t pte_advance_pfn(pte_t pte, unsigned long nr)
1003{
1004	if (__pte_needs_invert(pte_val(pte)))
1005		return __pte(pte_val(pte) - (nr << PFN_PTE_SHIFT));
1006	return __pte(pte_val(pte) + (nr << PFN_PTE_SHIFT));
1007}
1008#define pte_advance_pfn	pte_advance_pfn
1009
1010static inline int pte_present(pte_t a)
1011{
1012	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
1013}
1014
1015#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
1016static inline int pte_devmap(pte_t a)
1017{
1018	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
1019}
1020#endif
1021
1022#define pte_accessible pte_accessible
1023static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
1024{
1025	if (pte_flags(a) & _PAGE_PRESENT)
1026		return true;
1027
1028	if ((pte_flags(a) & _PAGE_PROTNONE) &&
1029			atomic_read(&mm->tlb_flush_pending))
1030		return true;
1031
1032	return false;
1033}
1034
1035static inline int pmd_present(pmd_t pmd)
1036{
1037	/*
1038	 * Checking for _PAGE_PSE is needed too because
1039	 * split_huge_page will temporarily clear the present bit (but
1040	 * the _PAGE_PSE flag will remain set at all times while the
1041	 * _PAGE_PRESENT bit is clear).
1042	 */
1043	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
1044}
1045
1046#ifdef CONFIG_NUMA_BALANCING
1047/*
1048 * These work without NUMA balancing but the kernel does not care. See the
1049 * comment in include/linux/pgtable.h
1050 */
1051static inline int pte_protnone(pte_t pte)
1052{
1053	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
1054		== _PAGE_PROTNONE;
1055}
1056
1057static inline int pmd_protnone(pmd_t pmd)
1058{
1059	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
1060		== _PAGE_PROTNONE;
1061}
1062#endif /* CONFIG_NUMA_BALANCING */
1063
1064static inline int pmd_none(pmd_t pmd)
1065{
1066	/* Only check low word on 32-bit platforms, since it might be
1067	   out of sync with upper half. */
1068	unsigned long val = native_pmd_val(pmd);
1069	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
1070}
1071
1072static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1073{
1074	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
1075}
1076
1077/*
1078 * Currently stuck as a macro due to indirect forward reference to
1079 * linux/mmzone.h's __section_mem_map_addr() definition:
1080 */
1081#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
1082
1083/*
 
 
 
 
 
 
 
 
 
 
 
1084 * Conversion functions: convert a page and protection to a page entry,
1085 * and a page entry and page directory to the page they refer to.
1086 *
1087 * (Currently stuck as a macro because of indirect forward reference
1088 * to linux/mm.h:page_to_nid())
1089 */
1090#define mk_pte(page, pgprot)						  \
1091({									  \
1092	pgprot_t __pgprot = pgprot;					  \
1093									  \
1094	WARN_ON_ONCE((pgprot_val(__pgprot) & (_PAGE_DIRTY | _PAGE_RW)) == \
1095		    _PAGE_DIRTY);					  \
1096	pfn_pte(page_to_pfn(page), __pgprot);				  \
1097})
 
 
 
 
 
 
 
 
 
1098
1099static inline int pmd_bad(pmd_t pmd)
1100{
1101	return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
1102	       (_KERNPG_TABLE & ~_PAGE_ACCESSED);
1103}
1104
1105static inline unsigned long pages_to_mb(unsigned long npg)
1106{
1107	return npg >> (20 - PAGE_SHIFT);
1108}
1109
1110#if CONFIG_PGTABLE_LEVELS > 2
1111static inline int pud_none(pud_t pud)
1112{
1113	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
1114}
1115
1116static inline int pud_present(pud_t pud)
1117{
1118	return pud_flags(pud) & _PAGE_PRESENT;
1119}
1120
1121static inline pmd_t *pud_pgtable(pud_t pud)
1122{
1123	return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
1124}
1125
1126/*
1127 * Currently stuck as a macro due to indirect forward reference to
1128 * linux/mmzone.h's __section_mem_map_addr() definition:
1129 */
1130#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
1131
1132#define pud_leaf pud_leaf
1133static inline bool pud_leaf(pud_t pud)
 
 
 
 
 
1134{
1135	return pud_val(pud) & _PAGE_PSE;
 
1136}
1137
1138static inline int pud_bad(pud_t pud)
1139{
1140	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
1141}
 
 
 
 
 
1142#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
1143
 
 
 
 
 
1144#if CONFIG_PGTABLE_LEVELS > 3
1145static inline int p4d_none(p4d_t p4d)
1146{
1147	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
1148}
1149
1150static inline int p4d_present(p4d_t p4d)
1151{
1152	return p4d_flags(p4d) & _PAGE_PRESENT;
1153}
1154
1155static inline pud_t *p4d_pgtable(p4d_t p4d)
1156{
1157	return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
1158}
1159
1160/*
1161 * Currently stuck as a macro due to indirect forward reference to
1162 * linux/mmzone.h's __section_mem_map_addr() definition:
1163 */
1164#define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
1165
 
 
 
 
 
 
1166static inline int p4d_bad(p4d_t p4d)
1167{
1168	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
1169
1170	if (IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION))
1171		ignore_flags |= _PAGE_NX;
1172
1173	return (p4d_flags(p4d) & ~ignore_flags) != 0;
1174}
1175#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
1176
1177static inline unsigned long p4d_index(unsigned long address)
1178{
1179	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
1180}
1181
1182#if CONFIG_PGTABLE_LEVELS > 4
1183static inline int pgd_present(pgd_t pgd)
1184{
1185	if (!pgtable_l5_enabled())
1186		return 1;
1187	return pgd_flags(pgd) & _PAGE_PRESENT;
1188}
1189
1190static inline unsigned long pgd_page_vaddr(pgd_t pgd)
1191{
1192	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
1193}
1194
1195/*
1196 * Currently stuck as a macro due to indirect forward reference to
1197 * linux/mmzone.h's __section_mem_map_addr() definition:
1198 */
1199#define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
1200
1201/* to find an entry in a page-table-directory. */
1202static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
1203{
1204	if (!pgtable_l5_enabled())
1205		return (p4d_t *)pgd;
1206	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
1207}
1208
1209static inline int pgd_bad(pgd_t pgd)
1210{
1211	unsigned long ignore_flags = _PAGE_USER;
1212
1213	if (!pgtable_l5_enabled())
1214		return 0;
1215
1216	if (IS_ENABLED(CONFIG_MITIGATION_PAGE_TABLE_ISOLATION))
1217		ignore_flags |= _PAGE_NX;
1218
1219	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
1220}
1221
1222static inline int pgd_none(pgd_t pgd)
1223{
1224	if (!pgtable_l5_enabled())
1225		return 0;
1226	/*
1227	 * There is no need to do a workaround for the KNL stray
1228	 * A/D bit erratum here.  PGDs only point to page tables
1229	 * except on 32-bit non-PAE which is not supported on
1230	 * KNL.
1231	 */
1232	return !native_pgd_val(pgd);
1233}
1234#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
1235
1236#endif	/* __ASSEMBLY__ */
1237
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1238#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
1239#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
1240
1241#ifndef __ASSEMBLY__
1242
1243extern int direct_gbpages;
1244void init_mem_mapping(void);
1245void early_alloc_pgt_buf(void);
1246void __init poking_init(void);
1247unsigned long init_memory_mapping(unsigned long start,
1248				  unsigned long end, pgprot_t prot);
1249
1250#ifdef CONFIG_X86_64
 
1251extern pgd_t trampoline_pgd_entry;
 
 
 
 
 
 
 
 
 
 
 
 
1252#endif
1253
1254/* local pte updates need not use xchg for locking */
1255static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
1256{
1257	pte_t res = *ptep;
1258
1259	/* Pure native function needs no input for mm, addr */
1260	native_pte_clear(NULL, 0, ptep);
1261	return res;
1262}
1263
1264static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1265{
1266	pmd_t res = *pmdp;
1267
1268	native_pmd_clear(pmdp);
1269	return res;
1270}
1271
1272static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
1273{
1274	pud_t res = *pudp;
1275
1276	native_pud_clear(pudp);
1277	return res;
1278}
1279
 
 
 
 
 
 
1280static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1281			      pmd_t *pmdp, pmd_t pmd)
1282{
1283	page_table_check_pmd_set(mm, pmdp, pmd);
1284	set_pmd(pmdp, pmd);
1285}
1286
1287static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1288			      pud_t *pudp, pud_t pud)
1289{
1290	page_table_check_pud_set(mm, pudp, pud);
1291	native_set_pud(pudp, pud);
1292}
1293
1294/*
1295 * We only update the dirty/accessed state if we set
1296 * the dirty bit by hand in the kernel, since the hardware
1297 * will do the accessed bit for us, and we don't want to
1298 * race with other CPU's that might be updating the dirty
1299 * bit at the same time.
1300 */
1301struct vm_area_struct;
1302
1303#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1304extern int ptep_set_access_flags(struct vm_area_struct *vma,
1305				 unsigned long address, pte_t *ptep,
1306				 pte_t entry, int dirty);
1307
1308#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1309extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1310				     unsigned long addr, pte_t *ptep);
1311
1312#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1313extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1314				  unsigned long address, pte_t *ptep);
1315
1316#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1317static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1318				       pte_t *ptep)
1319{
1320	pte_t pte = native_ptep_get_and_clear(ptep);
1321	page_table_check_pte_clear(mm, pte);
1322	return pte;
1323}
1324
1325#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1326static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1327					    unsigned long addr, pte_t *ptep,
1328					    int full)
1329{
1330	pte_t pte;
1331	if (full) {
1332		/*
1333		 * Full address destruction in progress; paravirt does not
1334		 * care about updates and native needs no locking
1335		 */
1336		pte = native_local_ptep_get_and_clear(ptep);
1337		page_table_check_pte_clear(mm, pte);
1338	} else {
1339		pte = ptep_get_and_clear(mm, addr, ptep);
1340	}
1341	return pte;
1342}
1343
1344#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1345static inline void ptep_set_wrprotect(struct mm_struct *mm,
1346				      unsigned long addr, pte_t *ptep)
1347{
1348	/*
1349	 * Avoid accidentally creating shadow stack PTEs
1350	 * (Write=0,Dirty=1).  Use cmpxchg() to prevent races with
1351	 * the hardware setting Dirty=1.
1352	 */
1353	pte_t old_pte, new_pte;
1354
1355	old_pte = READ_ONCE(*ptep);
1356	do {
1357		new_pte = pte_wrprotect(old_pte);
1358	} while (!try_cmpxchg((long *)&ptep->pte, (long *)&old_pte, *(long *)&new_pte));
1359}
1360
1361#define flush_tlb_fix_spurious_fault(vma, address, ptep) do { } while (0)
1362
1363#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1364
1365#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1366extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1367				 unsigned long address, pmd_t *pmdp,
1368				 pmd_t entry, int dirty);
1369extern int pudp_set_access_flags(struct vm_area_struct *vma,
1370				 unsigned long address, pud_t *pudp,
1371				 pud_t entry, int dirty);
1372
1373#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1374extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1375				     unsigned long addr, pmd_t *pmdp);
1376extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1377				     unsigned long addr, pud_t *pudp);
1378
1379#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1380extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1381				  unsigned long address, pmd_t *pmdp);
1382
1383
 
 
 
 
 
 
1384#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1385static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1386				       pmd_t *pmdp)
1387{
1388	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
1389
1390	page_table_check_pmd_clear(mm, pmd);
1391
1392	return pmd;
1393}
1394
1395#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1396static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1397					unsigned long addr, pud_t *pudp)
1398{
1399	pud_t pud = native_pudp_get_and_clear(pudp);
1400
1401	page_table_check_pud_clear(mm, pud);
1402
1403	return pud;
1404}
1405
1406#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1407static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1408				      unsigned long addr, pmd_t *pmdp)
1409{
1410	/*
1411	 * Avoid accidentally creating shadow stack PTEs
1412	 * (Write=0,Dirty=1).  Use cmpxchg() to prevent races with
1413	 * the hardware setting Dirty=1.
1414	 */
1415	pmd_t old_pmd, new_pmd;
1416
1417	old_pmd = READ_ONCE(*pmdp);
1418	do {
1419		new_pmd = pmd_wrprotect(old_pmd);
1420	} while (!try_cmpxchg((long *)pmdp, (long *)&old_pmd, *(long *)&new_pmd));
1421}
1422
1423#ifndef pmdp_establish
1424#define pmdp_establish pmdp_establish
1425static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1426		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1427{
1428	page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
1429	if (IS_ENABLED(CONFIG_SMP)) {
1430		return xchg(pmdp, pmd);
1431	} else {
1432		pmd_t old = *pmdp;
1433		WRITE_ONCE(*pmdp, pmd);
1434		return old;
1435	}
1436}
1437#endif
1438
1439#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
1440static inline pud_t pudp_establish(struct vm_area_struct *vma,
1441		unsigned long address, pud_t *pudp, pud_t pud)
1442{
1443	page_table_check_pud_set(vma->vm_mm, pudp, pud);
1444	if (IS_ENABLED(CONFIG_SMP)) {
1445		return xchg(pudp, pud);
1446	} else {
1447		pud_t old = *pudp;
1448		WRITE_ONCE(*pudp, pud);
1449		return old;
1450	}
1451}
1452#endif
1453
1454#define __HAVE_ARCH_PMDP_INVALIDATE_AD
1455extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1456				unsigned long address, pmd_t *pmdp);
1457
1458pud_t pudp_invalidate(struct vm_area_struct *vma, unsigned long address,
1459		      pud_t *pudp);
1460
1461/*
1462 * Page table pages are page-aligned.  The lower half of the top
1463 * level is used for userspace and the top half for the kernel.
1464 *
1465 * Returns true for parts of the PGD that map userspace and
1466 * false for the parts that map the kernel.
1467 */
1468static inline bool pgdp_maps_userspace(void *__ptr)
1469{
1470	unsigned long ptr = (unsigned long)__ptr;
1471
1472	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1473}
1474
1475#define pgd_leaf	pgd_leaf
1476static inline bool pgd_leaf(pgd_t pgd) { return false; }
1477
1478#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
1479/*
1480 * All top-level MITIGATION_PAGE_TABLE_ISOLATION page tables are order-1 pages
1481 * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1482 * the user one is in the last 4k.  To switch between them, you
1483 * just need to flip the 12th bit in their addresses.
1484 */
1485#define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1486
1487/*
1488 * This generates better code than the inline assembly in
1489 * __set_bit().
1490 */
1491static inline void *ptr_set_bit(void *ptr, int bit)
1492{
1493	unsigned long __ptr = (unsigned long)ptr;
1494
1495	__ptr |= BIT(bit);
1496	return (void *)__ptr;
1497}
1498static inline void *ptr_clear_bit(void *ptr, int bit)
1499{
1500	unsigned long __ptr = (unsigned long)ptr;
1501
1502	__ptr &= ~BIT(bit);
1503	return (void *)__ptr;
1504}
1505
1506static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1507{
1508	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1509}
1510
1511static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1512{
1513	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1514}
1515
1516static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1517{
1518	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1519}
1520
1521static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1522{
1523	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1524}
1525#endif /* CONFIG_MITIGATION_PAGE_TABLE_ISOLATION */
1526
1527/*
1528 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1529 *
1530 *  dst - pointer to pgd range anywhere on a pgd page
1531 *  src - ""
1532 *  count - the number of pgds to copy.
1533 *
1534 * dst and src can be on the same page, but the range must not overlap,
1535 * and must not cross a page boundary.
1536 */
1537static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1538{
1539	memcpy(dst, src, count * sizeof(pgd_t));
1540#ifdef CONFIG_MITIGATION_PAGE_TABLE_ISOLATION
1541	if (!static_cpu_has(X86_FEATURE_PTI))
1542		return;
1543	/* Clone the user space pgd as well */
1544	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1545	       count * sizeof(pgd_t));
1546#endif
1547}
1548
1549#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1550static inline int page_level_shift(enum pg_level level)
1551{
1552	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1553}
1554static inline unsigned long page_level_size(enum pg_level level)
1555{
1556	return 1UL << page_level_shift(level);
1557}
1558static inline unsigned long page_level_mask(enum pg_level level)
1559{
1560	return ~(page_level_size(level) - 1);
1561}
1562
1563/*
1564 * The x86 doesn't have any external MMU info: the kernel page
1565 * tables contain all the necessary information.
1566 */
1567static inline void update_mmu_cache(struct vm_area_struct *vma,
1568		unsigned long addr, pte_t *ptep)
1569{
1570}
1571static inline void update_mmu_cache_range(struct vm_fault *vmf,
1572		struct vm_area_struct *vma, unsigned long addr,
1573		pte_t *ptep, unsigned int nr)
1574{
1575}
1576static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1577		unsigned long addr, pmd_t *pmd)
1578{
1579}
1580static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1581		unsigned long addr, pud_t *pud)
1582{
1583}
1584static inline pte_t pte_swp_mkexclusive(pte_t pte)
1585{
1586	return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
1587}
1588
1589static inline int pte_swp_exclusive(pte_t pte)
1590{
1591	return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE;
1592}
1593
1594static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1595{
1596	return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE);
1597}
1598
1599#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1600static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1601{
1602	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1603}
1604
1605static inline int pte_swp_soft_dirty(pte_t pte)
1606{
1607	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1608}
1609
1610static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1611{
1612	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1613}
1614
1615#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1616static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1617{
1618	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1619}
1620
1621static inline int pmd_swp_soft_dirty(pmd_t pmd)
1622{
1623	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1624}
1625
1626static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1627{
1628	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1629}
1630#endif
1631#endif
1632
1633#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1634static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1635{
1636	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1637}
1638
1639static inline int pte_swp_uffd_wp(pte_t pte)
1640{
1641	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
1642}
1643
1644static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1645{
1646	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
 
1647}
1648
1649static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1650{
1651	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
 
 
 
 
 
1652}
1653
1654static inline int pmd_swp_uffd_wp(pmd_t pmd)
1655{
1656	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1657}
1658
1659static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1660{
1661	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1662}
1663#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1664
1665static inline u16 pte_flags_pkey(unsigned long pte_flags)
1666{
1667#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1668	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1669	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1670#else
1671	return 0;
1672#endif
1673}
1674
1675static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1676{
1677	u32 pkru = read_pkru();
1678
1679	if (!__pkru_allows_read(pkru, pkey))
1680		return false;
1681	if (write && !__pkru_allows_write(pkru, pkey))
1682		return false;
1683
1684	return true;
1685}
1686
1687/*
1688 * 'pteval' can come from a PTE, PMD or PUD.  We only check
1689 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1690 * same value on all 3 types.
1691 */
1692static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1693{
1694	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1695
1696	/*
1697	 * Write=0,Dirty=1 PTEs are shadow stack, which the kernel
1698	 * shouldn't generally allow access to, but since they
1699	 * are already Write=0, the below logic covers both cases.
1700	 */
1701	if (write)
1702		need_pte_bits |= _PAGE_RW;
1703
1704	if ((pteval & need_pte_bits) != need_pte_bits)
1705		return 0;
1706
1707	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1708}
1709
1710#define pte_access_permitted pte_access_permitted
1711static inline bool pte_access_permitted(pte_t pte, bool write)
1712{
1713	return __pte_access_permitted(pte_val(pte), write);
1714}
1715
1716#define pmd_access_permitted pmd_access_permitted
1717static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1718{
1719	return __pte_access_permitted(pmd_val(pmd), write);
1720}
1721
1722#define pud_access_permitted pud_access_permitted
1723static inline bool pud_access_permitted(pud_t pud, bool write)
1724{
1725	return __pte_access_permitted(pud_val(pud), write);
1726}
1727
1728#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1729extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1730
1731static inline bool arch_has_pfn_modify_check(void)
1732{
1733	return boot_cpu_has_bug(X86_BUG_L1TF);
1734}
1735
1736#define arch_check_zapped_pte arch_check_zapped_pte
1737void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte);
1738
1739#define arch_check_zapped_pmd arch_check_zapped_pmd
1740void arch_check_zapped_pmd(struct vm_area_struct *vma, pmd_t pmd);
1741
1742#define arch_check_zapped_pud arch_check_zapped_pud
1743void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud);
1744
1745#ifdef CONFIG_XEN_PV
1746#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
1747static inline bool arch_has_hw_nonleaf_pmd_young(void)
1748{
1749	return !cpu_feature_enabled(X86_FEATURE_XENPV);
1750}
1751#endif
1752
1753#ifdef CONFIG_PAGE_TABLE_CHECK
1754static inline bool pte_user_accessible_page(pte_t pte)
1755{
1756	return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
1757}
1758
1759static inline bool pmd_user_accessible_page(pmd_t pmd)
1760{
1761	return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
1762}
1763
1764static inline bool pud_user_accessible_page(pud_t pud)
1765{
1766	return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
1767}
1768#endif
1769
1770#ifdef CONFIG_X86_SGX
1771int arch_memory_failure(unsigned long pfn, int flags);
1772#define arch_memory_failure arch_memory_failure
1773
1774bool arch_is_platform_page(u64 paddr);
1775#define arch_is_platform_page arch_is_platform_page
1776#endif
1777
1778/*
1779 * Use set_p*_safe(), and elide TLB flushing, when confident that *no*
1780 * TLB flush will be required as a result of the "set". For example, use
1781 * in scenarios where it is known ahead of time that the routine is
1782 * setting non-present entries, or re-setting an existing entry to the
1783 * same value. Otherwise, use the typical "set" helpers and flush the
1784 * TLB.
1785 */
1786#define set_pte_safe(ptep, pte) \
1787({ \
1788	WARN_ON_ONCE(pte_present(*ptep) && !pte_same(*ptep, pte)); \
1789	set_pte(ptep, pte); \
1790})
1791
1792#define set_pmd_safe(pmdp, pmd) \
1793({ \
1794	WARN_ON_ONCE(pmd_present(*pmdp) && !pmd_same(*pmdp, pmd)); \
1795	set_pmd(pmdp, pmd); \
1796})
1797
1798#define set_pud_safe(pudp, pud) \
1799({ \
1800	WARN_ON_ONCE(pud_present(*pudp) && !pud_same(*pudp, pud)); \
1801	set_pud(pudp, pud); \
1802})
1803
1804#define set_p4d_safe(p4dp, p4d) \
1805({ \
1806	WARN_ON_ONCE(p4d_present(*p4dp) && !p4d_same(*p4dp, p4d)); \
1807	set_p4d(p4dp, p4d); \
1808})
1809
1810#define set_pgd_safe(pgdp, pgd) \
1811({ \
1812	WARN_ON_ONCE(pgd_present(*pgdp) && !pgd_same(*pgdp, pgd)); \
1813	set_pgd(pgdp, pgd); \
1814})
1815#endif	/* __ASSEMBLY__ */
1816
1817#endif /* _ASM_X86_PGTABLE_H */