Linux Audio

Check our new training course

Buildroot integration, development and maintenance

Need a Buildroot system for your embedded project?
Loading...
Note: File does not exist in v4.17.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * This kernel test validates architecture page table helpers and
   4 * accessors and helps in verifying their continued compliance with
   5 * expected generic MM semantics.
   6 *
   7 * Copyright (C) 2019 ARM Ltd.
   8 *
   9 * Author: Anshuman Khandual <anshuman.khandual@arm.com>
  10 */
  11#define pr_fmt(fmt) "debug_vm_pgtable: [%-25s]: " fmt, __func__
  12
  13#include <linux/gfp.h>
  14#include <linux/highmem.h>
  15#include <linux/hugetlb.h>
  16#include <linux/kernel.h>
  17#include <linux/kconfig.h>
  18#include <linux/memblock.h>
  19#include <linux/mm.h>
  20#include <linux/mman.h>
  21#include <linux/mm_types.h>
  22#include <linux/module.h>
  23#include <linux/pfn_t.h>
  24#include <linux/printk.h>
  25#include <linux/pgtable.h>
  26#include <linux/random.h>
  27#include <linux/spinlock.h>
  28#include <linux/swap.h>
  29#include <linux/swapops.h>
  30#include <linux/start_kernel.h>
  31#include <linux/sched/mm.h>
  32#include <linux/io.h>
  33
  34#include <asm/cacheflush.h>
  35#include <asm/pgalloc.h>
  36#include <asm/tlbflush.h>
  37
  38/*
  39 * Please refer Documentation/mm/arch_pgtable_helpers.rst for the semantics
  40 * expectations that are being validated here. All future changes in here
  41 * or the documentation need to be in sync.
  42 *
  43 * On s390 platform, the lower 4 bits are used to identify given page table
  44 * entry type. But these bits might affect the ability to clear entries with
  45 * pxx_clear() because of how dynamic page table folding works on s390. So
  46 * while loading up the entries do not change the lower 4 bits. It does not
  47 * have affect any other platform. Also avoid the 62nd bit on ppc64 that is
  48 * used to mark a pte entry.
  49 */
  50#define S390_SKIP_MASK		GENMASK(3, 0)
  51#if __BITS_PER_LONG == 64
  52#define PPC64_SKIP_MASK		GENMASK(62, 62)
  53#else
  54#define PPC64_SKIP_MASK		0x0
  55#endif
  56#define ARCH_SKIP_MASK (S390_SKIP_MASK | PPC64_SKIP_MASK)
  57#define RANDOM_ORVALUE (GENMASK(BITS_PER_LONG - 1, 0) & ~ARCH_SKIP_MASK)
  58#define RANDOM_NZVALUE	GENMASK(7, 0)
  59
  60struct pgtable_debug_args {
  61	struct mm_struct	*mm;
  62	struct vm_area_struct	*vma;
  63
  64	pgd_t			*pgdp;
  65	p4d_t			*p4dp;
  66	pud_t			*pudp;
  67	pmd_t			*pmdp;
  68	pte_t			*ptep;
  69
  70	p4d_t			*start_p4dp;
  71	pud_t			*start_pudp;
  72	pmd_t			*start_pmdp;
  73	pgtable_t		start_ptep;
  74
  75	unsigned long		vaddr;
  76	pgprot_t		page_prot;
  77	pgprot_t		page_prot_none;
  78
  79	bool			is_contiguous_page;
  80	unsigned long		pud_pfn;
  81	unsigned long		pmd_pfn;
  82	unsigned long		pte_pfn;
  83
  84	unsigned long		fixed_alignment;
  85	unsigned long		fixed_pgd_pfn;
  86	unsigned long		fixed_p4d_pfn;
  87	unsigned long		fixed_pud_pfn;
  88	unsigned long		fixed_pmd_pfn;
  89	unsigned long		fixed_pte_pfn;
  90};
  91
  92static void __init pte_basic_tests(struct pgtable_debug_args *args, int idx)
  93{
  94	pgprot_t prot = vm_get_page_prot(idx);
  95	pte_t pte = pfn_pte(args->fixed_pte_pfn, prot);
  96	unsigned long val = idx, *ptr = &val;
  97
  98	pr_debug("Validating PTE basic (%pGv)\n", ptr);
  99
 100	/*
 101	 * This test needs to be executed after the given page table entry
 102	 * is created with pfn_pte() to make sure that vm_get_page_prot(idx)
 103	 * does not have the dirty bit enabled from the beginning. This is
 104	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
 105	 * dirty bit being set.
 106	 */
 107	WARN_ON(pte_dirty(pte_wrprotect(pte)));
 108
 109	WARN_ON(!pte_same(pte, pte));
 110	WARN_ON(!pte_young(pte_mkyoung(pte_mkold(pte))));
 111	WARN_ON(!pte_dirty(pte_mkdirty(pte_mkclean(pte))));
 112	WARN_ON(!pte_write(pte_mkwrite(pte_wrprotect(pte), args->vma)));
 113	WARN_ON(pte_young(pte_mkold(pte_mkyoung(pte))));
 114	WARN_ON(pte_dirty(pte_mkclean(pte_mkdirty(pte))));
 115	WARN_ON(pte_write(pte_wrprotect(pte_mkwrite(pte, args->vma))));
 116	WARN_ON(pte_dirty(pte_wrprotect(pte_mkclean(pte))));
 117	WARN_ON(!pte_dirty(pte_wrprotect(pte_mkdirty(pte))));
 118}
 119
 120static void __init pte_advanced_tests(struct pgtable_debug_args *args)
 121{
 122	struct page *page;
 123	pte_t pte;
 124
 125	/*
 126	 * Architectures optimize set_pte_at by avoiding TLB flush.
 127	 * This requires set_pte_at to be not used to update an
 128	 * existing pte entry. Clear pte before we do set_pte_at
 129	 *
 130	 * flush_dcache_page() is called after set_pte_at() to clear
 131	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
 132	 * when it's released and page allocation check will fail when
 133	 * the page is allocated again. For architectures other than ARM64,
 134	 * the unexpected overhead of cache flushing is acceptable.
 135	 */
 136	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
 137	if (!page)
 138		return;
 139
 140	pr_debug("Validating PTE advanced\n");
 141	if (WARN_ON(!args->ptep))
 142		return;
 143
 144	pte = pfn_pte(args->pte_pfn, args->page_prot);
 145	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
 146	flush_dcache_page(page);
 147	ptep_set_wrprotect(args->mm, args->vaddr, args->ptep);
 148	pte = ptep_get(args->ptep);
 149	WARN_ON(pte_write(pte));
 150	ptep_get_and_clear(args->mm, args->vaddr, args->ptep);
 151	pte = ptep_get(args->ptep);
 152	WARN_ON(!pte_none(pte));
 153
 154	pte = pfn_pte(args->pte_pfn, args->page_prot);
 155	pte = pte_wrprotect(pte);
 156	pte = pte_mkclean(pte);
 157	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
 158	flush_dcache_page(page);
 159	pte = pte_mkwrite(pte, args->vma);
 160	pte = pte_mkdirty(pte);
 161	ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1);
 162	pte = ptep_get(args->ptep);
 163	WARN_ON(!(pte_write(pte) && pte_dirty(pte)));
 164	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
 165	pte = ptep_get(args->ptep);
 166	WARN_ON(!pte_none(pte));
 167
 168	pte = pfn_pte(args->pte_pfn, args->page_prot);
 169	pte = pte_mkyoung(pte);
 170	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
 171	flush_dcache_page(page);
 172	ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep);
 173	pte = ptep_get(args->ptep);
 174	WARN_ON(pte_young(pte));
 175
 176	ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1);
 177}
 178
 179#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 180static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx)
 181{
 182	pgprot_t prot = vm_get_page_prot(idx);
 183	unsigned long val = idx, *ptr = &val;
 184	pmd_t pmd;
 185
 186	if (!has_transparent_hugepage())
 187		return;
 188
 189	pr_debug("Validating PMD basic (%pGv)\n", ptr);
 190	pmd = pfn_pmd(args->fixed_pmd_pfn, prot);
 191
 192	/*
 193	 * This test needs to be executed after the given page table entry
 194	 * is created with pfn_pmd() to make sure that vm_get_page_prot(idx)
 195	 * does not have the dirty bit enabled from the beginning. This is
 196	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
 197	 * dirty bit being set.
 198	 */
 199	WARN_ON(pmd_dirty(pmd_wrprotect(pmd)));
 200
 201
 202	WARN_ON(!pmd_same(pmd, pmd));
 203	WARN_ON(!pmd_young(pmd_mkyoung(pmd_mkold(pmd))));
 204	WARN_ON(!pmd_dirty(pmd_mkdirty(pmd_mkclean(pmd))));
 205	WARN_ON(!pmd_write(pmd_mkwrite(pmd_wrprotect(pmd), args->vma)));
 206	WARN_ON(pmd_young(pmd_mkold(pmd_mkyoung(pmd))));
 207	WARN_ON(pmd_dirty(pmd_mkclean(pmd_mkdirty(pmd))));
 208	WARN_ON(pmd_write(pmd_wrprotect(pmd_mkwrite(pmd, args->vma))));
 209	WARN_ON(pmd_dirty(pmd_wrprotect(pmd_mkclean(pmd))));
 210	WARN_ON(!pmd_dirty(pmd_wrprotect(pmd_mkdirty(pmd))));
 211	/*
 212	 * A huge page does not point to next level page table
 213	 * entry. Hence this must qualify as pmd_bad().
 214	 */
 215	WARN_ON(!pmd_bad(pmd_mkhuge(pmd)));
 216}
 217
 218static void __init pmd_advanced_tests(struct pgtable_debug_args *args)
 219{
 220	struct page *page;
 221	pmd_t pmd;
 222	unsigned long vaddr = args->vaddr;
 223
 224	if (!has_transparent_hugepage())
 225		return;
 226
 227	page = (args->pmd_pfn != ULONG_MAX) ? pfn_to_page(args->pmd_pfn) : NULL;
 228	if (!page)
 229		return;
 230
 231	/*
 232	 * flush_dcache_page() is called after set_pmd_at() to clear
 233	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
 234	 * when it's released and page allocation check will fail when
 235	 * the page is allocated again. For architectures other than ARM64,
 236	 * the unexpected overhead of cache flushing is acceptable.
 237	 */
 238	pr_debug("Validating PMD advanced\n");
 239	/* Align the address wrt HPAGE_PMD_SIZE */
 240	vaddr &= HPAGE_PMD_MASK;
 241
 242	pgtable_trans_huge_deposit(args->mm, args->pmdp, args->start_ptep);
 243
 244	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
 245	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
 246	flush_dcache_page(page);
 247	pmdp_set_wrprotect(args->mm, vaddr, args->pmdp);
 248	pmd = READ_ONCE(*args->pmdp);
 249	WARN_ON(pmd_write(pmd));
 250	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
 251	pmd = READ_ONCE(*args->pmdp);
 252	WARN_ON(!pmd_none(pmd));
 253
 254	pmd = pfn_pmd(args->pmd_pfn, args->page_prot);
 255	pmd = pmd_wrprotect(pmd);
 256	pmd = pmd_mkclean(pmd);
 257	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
 258	flush_dcache_page(page);
 259	pmd = pmd_mkwrite(pmd, args->vma);
 260	pmd = pmd_mkdirty(pmd);
 261	pmdp_set_access_flags(args->vma, vaddr, args->pmdp, pmd, 1);
 262	pmd = READ_ONCE(*args->pmdp);
 263	WARN_ON(!(pmd_write(pmd) && pmd_dirty(pmd)));
 264	pmdp_huge_get_and_clear_full(args->vma, vaddr, args->pmdp, 1);
 265	pmd = READ_ONCE(*args->pmdp);
 266	WARN_ON(!pmd_none(pmd));
 267
 268	pmd = pmd_mkhuge(pfn_pmd(args->pmd_pfn, args->page_prot));
 269	pmd = pmd_mkyoung(pmd);
 270	set_pmd_at(args->mm, vaddr, args->pmdp, pmd);
 271	flush_dcache_page(page);
 272	pmdp_test_and_clear_young(args->vma, vaddr, args->pmdp);
 273	pmd = READ_ONCE(*args->pmdp);
 274	WARN_ON(pmd_young(pmd));
 275
 276	/*  Clear the pte entries  */
 277	pmdp_huge_get_and_clear(args->mm, vaddr, args->pmdp);
 278	pgtable_trans_huge_withdraw(args->mm, args->pmdp);
 279}
 280
 281static void __init pmd_leaf_tests(struct pgtable_debug_args *args)
 282{
 283	pmd_t pmd;
 284
 285	if (!has_transparent_hugepage())
 286		return;
 287
 288	pr_debug("Validating PMD leaf\n");
 289	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
 290
 291	/*
 292	 * PMD based THP is a leaf entry.
 293	 */
 294	pmd = pmd_mkhuge(pmd);
 295	WARN_ON(!pmd_leaf(pmd));
 296}
 297
 298#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 299static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx)
 300{
 301	pgprot_t prot = vm_get_page_prot(idx);
 302	unsigned long val = idx, *ptr = &val;
 303	pud_t pud;
 304
 305	if (!has_transparent_pud_hugepage())
 306		return;
 307
 308	pr_debug("Validating PUD basic (%pGv)\n", ptr);
 309	pud = pfn_pud(args->fixed_pud_pfn, prot);
 310
 311	/*
 312	 * This test needs to be executed after the given page table entry
 313	 * is created with pfn_pud() to make sure that vm_get_page_prot(idx)
 314	 * does not have the dirty bit enabled from the beginning. This is
 315	 * important for platforms like arm64 where (!PTE_RDONLY) indicate
 316	 * dirty bit being set.
 317	 */
 318	WARN_ON(pud_dirty(pud_wrprotect(pud)));
 319
 320	WARN_ON(!pud_same(pud, pud));
 321	WARN_ON(!pud_young(pud_mkyoung(pud_mkold(pud))));
 322	WARN_ON(!pud_dirty(pud_mkdirty(pud_mkclean(pud))));
 323	WARN_ON(pud_dirty(pud_mkclean(pud_mkdirty(pud))));
 324	WARN_ON(!pud_write(pud_mkwrite(pud_wrprotect(pud))));
 325	WARN_ON(pud_write(pud_wrprotect(pud_mkwrite(pud))));
 326	WARN_ON(pud_young(pud_mkold(pud_mkyoung(pud))));
 327	WARN_ON(pud_dirty(pud_wrprotect(pud_mkclean(pud))));
 328	WARN_ON(!pud_dirty(pud_wrprotect(pud_mkdirty(pud))));
 329
 330	if (mm_pmd_folded(args->mm))
 331		return;
 332
 333	/*
 334	 * A huge page does not point to next level page table
 335	 * entry. Hence this must qualify as pud_bad().
 336	 */
 337	WARN_ON(!pud_bad(pud_mkhuge(pud)));
 338}
 339
 340static void __init pud_advanced_tests(struct pgtable_debug_args *args)
 341{
 342	struct page *page;
 343	unsigned long vaddr = args->vaddr;
 344	pud_t pud;
 345
 346	if (!has_transparent_pud_hugepage())
 347		return;
 348
 349	page = (args->pud_pfn != ULONG_MAX) ? pfn_to_page(args->pud_pfn) : NULL;
 350	if (!page)
 351		return;
 352
 353	/*
 354	 * flush_dcache_page() is called after set_pud_at() to clear
 355	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
 356	 * when it's released and page allocation check will fail when
 357	 * the page is allocated again. For architectures other than ARM64,
 358	 * the unexpected overhead of cache flushing is acceptable.
 359	 */
 360	pr_debug("Validating PUD advanced\n");
 361	/* Align the address wrt HPAGE_PUD_SIZE */
 362	vaddr &= HPAGE_PUD_MASK;
 363
 364	pud = pfn_pud(args->pud_pfn, args->page_prot);
 365	/*
 366	 * Some architectures have debug checks to make sure
 367	 * huge pud mapping are only found with devmap entries
 368	 * For now test with only devmap entries.
 369	 */
 370	pud = pud_mkdevmap(pud);
 371	set_pud_at(args->mm, vaddr, args->pudp, pud);
 372	flush_dcache_page(page);
 373	pudp_set_wrprotect(args->mm, vaddr, args->pudp);
 374	pud = READ_ONCE(*args->pudp);
 375	WARN_ON(pud_write(pud));
 376
 377#ifndef __PAGETABLE_PMD_FOLDED
 378	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
 379	pud = READ_ONCE(*args->pudp);
 380	WARN_ON(!pud_none(pud));
 381#endif /* __PAGETABLE_PMD_FOLDED */
 382	pud = pfn_pud(args->pud_pfn, args->page_prot);
 383	pud = pud_mkdevmap(pud);
 384	pud = pud_wrprotect(pud);
 385	pud = pud_mkclean(pud);
 386	set_pud_at(args->mm, vaddr, args->pudp, pud);
 387	flush_dcache_page(page);
 388	pud = pud_mkwrite(pud);
 389	pud = pud_mkdirty(pud);
 390	pudp_set_access_flags(args->vma, vaddr, args->pudp, pud, 1);
 391	pud = READ_ONCE(*args->pudp);
 392	WARN_ON(!(pud_write(pud) && pud_dirty(pud)));
 393
 394#ifndef __PAGETABLE_PMD_FOLDED
 395	pudp_huge_get_and_clear_full(args->vma, vaddr, args->pudp, 1);
 396	pud = READ_ONCE(*args->pudp);
 397	WARN_ON(!pud_none(pud));
 398#endif /* __PAGETABLE_PMD_FOLDED */
 399
 400	pud = pfn_pud(args->pud_pfn, args->page_prot);
 401	pud = pud_mkdevmap(pud);
 402	pud = pud_mkyoung(pud);
 403	set_pud_at(args->mm, vaddr, args->pudp, pud);
 404	flush_dcache_page(page);
 405	pudp_test_and_clear_young(args->vma, vaddr, args->pudp);
 406	pud = READ_ONCE(*args->pudp);
 407	WARN_ON(pud_young(pud));
 408
 409	pudp_huge_get_and_clear(args->mm, vaddr, args->pudp);
 410}
 411
 412static void __init pud_leaf_tests(struct pgtable_debug_args *args)
 413{
 414	pud_t pud;
 415
 416	if (!has_transparent_pud_hugepage())
 417		return;
 418
 419	pr_debug("Validating PUD leaf\n");
 420	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
 421	/*
 422	 * PUD based THP is a leaf entry.
 423	 */
 424	pud = pud_mkhuge(pud);
 425	WARN_ON(!pud_leaf(pud));
 426}
 427#else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 428static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
 429static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
 430static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
 431#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 432#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
 433static void __init pmd_basic_tests(struct pgtable_debug_args *args, int idx) { }
 434static void __init pud_basic_tests(struct pgtable_debug_args *args, int idx) { }
 435static void __init pmd_advanced_tests(struct pgtable_debug_args *args) { }
 436static void __init pud_advanced_tests(struct pgtable_debug_args *args) { }
 437static void __init pmd_leaf_tests(struct pgtable_debug_args *args) { }
 438static void __init pud_leaf_tests(struct pgtable_debug_args *args) { }
 439#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 440
 441#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
 442static void __init pmd_huge_tests(struct pgtable_debug_args *args)
 443{
 444	pmd_t pmd;
 445
 446	if (!arch_vmap_pmd_supported(args->page_prot) ||
 447	    args->fixed_alignment < PMD_SIZE)
 448		return;
 449
 450	pr_debug("Validating PMD huge\n");
 451	/*
 452	 * X86 defined pmd_set_huge() verifies that the given
 453	 * PMD is not a populated non-leaf entry.
 454	 */
 455	WRITE_ONCE(*args->pmdp, __pmd(0));
 456	WARN_ON(!pmd_set_huge(args->pmdp, __pfn_to_phys(args->fixed_pmd_pfn), args->page_prot));
 457	WARN_ON(!pmd_clear_huge(args->pmdp));
 458	pmd = READ_ONCE(*args->pmdp);
 459	WARN_ON(!pmd_none(pmd));
 460}
 461
 462static void __init pud_huge_tests(struct pgtable_debug_args *args)
 463{
 464	pud_t pud;
 465
 466	if (!arch_vmap_pud_supported(args->page_prot) ||
 467	    args->fixed_alignment < PUD_SIZE)
 468		return;
 469
 470	pr_debug("Validating PUD huge\n");
 471	/*
 472	 * X86 defined pud_set_huge() verifies that the given
 473	 * PUD is not a populated non-leaf entry.
 474	 */
 475	WRITE_ONCE(*args->pudp, __pud(0));
 476	WARN_ON(!pud_set_huge(args->pudp, __pfn_to_phys(args->fixed_pud_pfn), args->page_prot));
 477	WARN_ON(!pud_clear_huge(args->pudp));
 478	pud = READ_ONCE(*args->pudp);
 479	WARN_ON(!pud_none(pud));
 480}
 481#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
 482static void __init pmd_huge_tests(struct pgtable_debug_args *args) { }
 483static void __init pud_huge_tests(struct pgtable_debug_args *args) { }
 484#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
 485
 486static void __init p4d_basic_tests(struct pgtable_debug_args *args)
 487{
 488	p4d_t p4d;
 489
 490	pr_debug("Validating P4D basic\n");
 491	memset(&p4d, RANDOM_NZVALUE, sizeof(p4d_t));
 492	WARN_ON(!p4d_same(p4d, p4d));
 493}
 494
 495static void __init pgd_basic_tests(struct pgtable_debug_args *args)
 496{
 497	pgd_t pgd;
 498
 499	pr_debug("Validating PGD basic\n");
 500	memset(&pgd, RANDOM_NZVALUE, sizeof(pgd_t));
 501	WARN_ON(!pgd_same(pgd, pgd));
 502}
 503
 504#ifndef __PAGETABLE_PUD_FOLDED
 505static void __init pud_clear_tests(struct pgtable_debug_args *args)
 506{
 507	pud_t pud = READ_ONCE(*args->pudp);
 508
 509	if (mm_pmd_folded(args->mm))
 510		return;
 511
 512	pr_debug("Validating PUD clear\n");
 513	pud = __pud(pud_val(pud) | RANDOM_ORVALUE);
 514	WRITE_ONCE(*args->pudp, pud);
 515	pud_clear(args->pudp);
 516	pud = READ_ONCE(*args->pudp);
 517	WARN_ON(!pud_none(pud));
 518}
 519
 520static void __init pud_populate_tests(struct pgtable_debug_args *args)
 521{
 522	pud_t pud;
 523
 524	if (mm_pmd_folded(args->mm))
 525		return;
 526
 527	pr_debug("Validating PUD populate\n");
 528	/*
 529	 * This entry points to next level page table page.
 530	 * Hence this must not qualify as pud_bad().
 531	 */
 532	pud_populate(args->mm, args->pudp, args->start_pmdp);
 533	pud = READ_ONCE(*args->pudp);
 534	WARN_ON(pud_bad(pud));
 535}
 536#else  /* !__PAGETABLE_PUD_FOLDED */
 537static void __init pud_clear_tests(struct pgtable_debug_args *args) { }
 538static void __init pud_populate_tests(struct pgtable_debug_args *args) { }
 539#endif /* PAGETABLE_PUD_FOLDED */
 540
 541#ifndef __PAGETABLE_P4D_FOLDED
 542static void __init p4d_clear_tests(struct pgtable_debug_args *args)
 543{
 544	p4d_t p4d = READ_ONCE(*args->p4dp);
 545
 546	if (mm_pud_folded(args->mm))
 547		return;
 548
 549	pr_debug("Validating P4D clear\n");
 550	p4d = __p4d(p4d_val(p4d) | RANDOM_ORVALUE);
 551	WRITE_ONCE(*args->p4dp, p4d);
 552	p4d_clear(args->p4dp);
 553	p4d = READ_ONCE(*args->p4dp);
 554	WARN_ON(!p4d_none(p4d));
 555}
 556
 557static void __init p4d_populate_tests(struct pgtable_debug_args *args)
 558{
 559	p4d_t p4d;
 560
 561	if (mm_pud_folded(args->mm))
 562		return;
 563
 564	pr_debug("Validating P4D populate\n");
 565	/*
 566	 * This entry points to next level page table page.
 567	 * Hence this must not qualify as p4d_bad().
 568	 */
 569	pud_clear(args->pudp);
 570	p4d_clear(args->p4dp);
 571	p4d_populate(args->mm, args->p4dp, args->start_pudp);
 572	p4d = READ_ONCE(*args->p4dp);
 573	WARN_ON(p4d_bad(p4d));
 574}
 575
 576static void __init pgd_clear_tests(struct pgtable_debug_args *args)
 577{
 578	pgd_t pgd = READ_ONCE(*(args->pgdp));
 579
 580	if (mm_p4d_folded(args->mm))
 581		return;
 582
 583	pr_debug("Validating PGD clear\n");
 584	pgd = __pgd(pgd_val(pgd) | RANDOM_ORVALUE);
 585	WRITE_ONCE(*args->pgdp, pgd);
 586	pgd_clear(args->pgdp);
 587	pgd = READ_ONCE(*args->pgdp);
 588	WARN_ON(!pgd_none(pgd));
 589}
 590
 591static void __init pgd_populate_tests(struct pgtable_debug_args *args)
 592{
 593	pgd_t pgd;
 594
 595	if (mm_p4d_folded(args->mm))
 596		return;
 597
 598	pr_debug("Validating PGD populate\n");
 599	/*
 600	 * This entry points to next level page table page.
 601	 * Hence this must not qualify as pgd_bad().
 602	 */
 603	p4d_clear(args->p4dp);
 604	pgd_clear(args->pgdp);
 605	pgd_populate(args->mm, args->pgdp, args->start_p4dp);
 606	pgd = READ_ONCE(*args->pgdp);
 607	WARN_ON(pgd_bad(pgd));
 608}
 609#else  /* !__PAGETABLE_P4D_FOLDED */
 610static void __init p4d_clear_tests(struct pgtable_debug_args *args) { }
 611static void __init pgd_clear_tests(struct pgtable_debug_args *args) { }
 612static void __init p4d_populate_tests(struct pgtable_debug_args *args) { }
 613static void __init pgd_populate_tests(struct pgtable_debug_args *args) { }
 614#endif /* PAGETABLE_P4D_FOLDED */
 615
 616static void __init pte_clear_tests(struct pgtable_debug_args *args)
 617{
 618	struct page *page;
 619	pte_t pte = pfn_pte(args->pte_pfn, args->page_prot);
 620
 621	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
 622	if (!page)
 623		return;
 624
 625	/*
 626	 * flush_dcache_page() is called after set_pte_at() to clear
 627	 * PG_arch_1 for the page on ARM64. The page flag isn't cleared
 628	 * when it's released and page allocation check will fail when
 629	 * the page is allocated again. For architectures other than ARM64,
 630	 * the unexpected overhead of cache flushing is acceptable.
 631	 */
 632	pr_debug("Validating PTE clear\n");
 633	if (WARN_ON(!args->ptep))
 634		return;
 635
 636#ifndef CONFIG_RISCV
 637	pte = __pte(pte_val(pte) | RANDOM_ORVALUE);
 638#endif
 639	set_pte_at(args->mm, args->vaddr, args->ptep, pte);
 640	flush_dcache_page(page);
 641	barrier();
 642	ptep_clear(args->mm, args->vaddr, args->ptep);
 643	pte = ptep_get(args->ptep);
 644	WARN_ON(!pte_none(pte));
 645}
 646
 647static void __init pmd_clear_tests(struct pgtable_debug_args *args)
 648{
 649	pmd_t pmd = READ_ONCE(*args->pmdp);
 650
 651	pr_debug("Validating PMD clear\n");
 652	pmd = __pmd(pmd_val(pmd) | RANDOM_ORVALUE);
 653	WRITE_ONCE(*args->pmdp, pmd);
 654	pmd_clear(args->pmdp);
 655	pmd = READ_ONCE(*args->pmdp);
 656	WARN_ON(!pmd_none(pmd));
 657}
 658
 659static void __init pmd_populate_tests(struct pgtable_debug_args *args)
 660{
 661	pmd_t pmd;
 662
 663	pr_debug("Validating PMD populate\n");
 664	/*
 665	 * This entry points to next level page table page.
 666	 * Hence this must not qualify as pmd_bad().
 667	 */
 668	pmd_populate(args->mm, args->pmdp, args->start_ptep);
 669	pmd = READ_ONCE(*args->pmdp);
 670	WARN_ON(pmd_bad(pmd));
 671}
 672
 673static void __init pte_special_tests(struct pgtable_debug_args *args)
 674{
 675	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
 676
 677	if (!IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL))
 678		return;
 679
 680	pr_debug("Validating PTE special\n");
 681	WARN_ON(!pte_special(pte_mkspecial(pte)));
 682}
 683
 684static void __init pte_protnone_tests(struct pgtable_debug_args *args)
 685{
 686	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot_none);
 687
 688	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
 689		return;
 690
 691	pr_debug("Validating PTE protnone\n");
 692	WARN_ON(!pte_protnone(pte));
 693	WARN_ON(!pte_present(pte));
 694}
 695
 696#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 697static void __init pmd_protnone_tests(struct pgtable_debug_args *args)
 698{
 699	pmd_t pmd;
 700
 701	if (!IS_ENABLED(CONFIG_NUMA_BALANCING))
 702		return;
 703
 704	if (!has_transparent_hugepage())
 705		return;
 706
 707	pr_debug("Validating PMD protnone\n");
 708	pmd = pmd_mkhuge(pfn_pmd(args->fixed_pmd_pfn, args->page_prot_none));
 709	WARN_ON(!pmd_protnone(pmd));
 710	WARN_ON(!pmd_present(pmd));
 711}
 712#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
 713static void __init pmd_protnone_tests(struct pgtable_debug_args *args) { }
 714#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 715
 716#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
 717static void __init pte_devmap_tests(struct pgtable_debug_args *args)
 718{
 719	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
 720
 721	pr_debug("Validating PTE devmap\n");
 722	WARN_ON(!pte_devmap(pte_mkdevmap(pte)));
 723}
 724
 725#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 726static void __init pmd_devmap_tests(struct pgtable_debug_args *args)
 727{
 728	pmd_t pmd;
 729
 730	if (!has_transparent_hugepage())
 731		return;
 732
 733	pr_debug("Validating PMD devmap\n");
 734	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
 735	WARN_ON(!pmd_devmap(pmd_mkdevmap(pmd)));
 736}
 737
 738#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 739static void __init pud_devmap_tests(struct pgtable_debug_args *args)
 740{
 741	pud_t pud;
 742
 743	if (!has_transparent_pud_hugepage())
 744		return;
 745
 746	pr_debug("Validating PUD devmap\n");
 747	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
 748	WARN_ON(!pud_devmap(pud_mkdevmap(pud)));
 749}
 750#else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 751static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
 752#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
 753#else  /* CONFIG_TRANSPARENT_HUGEPAGE */
 754static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
 755static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
 756#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 757#else
 758static void __init pte_devmap_tests(struct pgtable_debug_args *args) { }
 759static void __init pmd_devmap_tests(struct pgtable_debug_args *args) { }
 760static void __init pud_devmap_tests(struct pgtable_debug_args *args) { }
 761#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
 762
 763static void __init pte_soft_dirty_tests(struct pgtable_debug_args *args)
 764{
 765	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
 766
 767	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
 768		return;
 769
 770	pr_debug("Validating PTE soft dirty\n");
 771	WARN_ON(!pte_soft_dirty(pte_mksoft_dirty(pte)));
 772	WARN_ON(pte_soft_dirty(pte_clear_soft_dirty(pte)));
 773}
 774
 775static void __init pte_swap_soft_dirty_tests(struct pgtable_debug_args *args)
 776{
 777	pte_t pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
 778
 779	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
 780		return;
 781
 782	pr_debug("Validating PTE swap soft dirty\n");
 783	WARN_ON(!pte_swp_soft_dirty(pte_swp_mksoft_dirty(pte)));
 784	WARN_ON(pte_swp_soft_dirty(pte_swp_clear_soft_dirty(pte)));
 785}
 786
 787#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 788static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args)
 789{
 790	pmd_t pmd;
 791
 792	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY))
 793		return;
 794
 795	if (!has_transparent_hugepage())
 796		return;
 797
 798	pr_debug("Validating PMD soft dirty\n");
 799	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
 800	WARN_ON(!pmd_soft_dirty(pmd_mksoft_dirty(pmd)));
 801	WARN_ON(pmd_soft_dirty(pmd_clear_soft_dirty(pmd)));
 802}
 803
 804static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args)
 805{
 806	pmd_t pmd;
 807
 808	if (!IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) ||
 809		!IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION))
 810		return;
 811
 812	if (!has_transparent_hugepage())
 813		return;
 814
 815	pr_debug("Validating PMD swap soft dirty\n");
 816	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
 817	WARN_ON(!pmd_swp_soft_dirty(pmd_swp_mksoft_dirty(pmd)));
 818	WARN_ON(pmd_swp_soft_dirty(pmd_swp_clear_soft_dirty(pmd)));
 819}
 820#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
 821static void __init pmd_soft_dirty_tests(struct pgtable_debug_args *args) { }
 822static void __init pmd_swap_soft_dirty_tests(struct pgtable_debug_args *args) { }
 823#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 824
 825static void __init pte_swap_exclusive_tests(struct pgtable_debug_args *args)
 826{
 827	unsigned long max_swap_offset;
 828	swp_entry_t entry, entry2;
 829	pte_t pte;
 830
 831	pr_debug("Validating PTE swap exclusive\n");
 832
 833	/* See generic_max_swapfile_size(): probe the maximum offset */
 834	max_swap_offset = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0, ~0UL))));
 835
 836	/* Create a swp entry with all possible bits set */
 837	entry = swp_entry((1 << MAX_SWAPFILES_SHIFT) - 1, max_swap_offset);
 838
 839	pte = swp_entry_to_pte(entry);
 840	WARN_ON(pte_swp_exclusive(pte));
 841	WARN_ON(!is_swap_pte(pte));
 842	entry2 = pte_to_swp_entry(pte);
 843	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
 844
 845	pte = pte_swp_mkexclusive(pte);
 846	WARN_ON(!pte_swp_exclusive(pte));
 847	WARN_ON(!is_swap_pte(pte));
 848	WARN_ON(pte_swp_soft_dirty(pte));
 849	entry2 = pte_to_swp_entry(pte);
 850	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
 851
 852	pte = pte_swp_clear_exclusive(pte);
 853	WARN_ON(pte_swp_exclusive(pte));
 854	WARN_ON(!is_swap_pte(pte));
 855	entry2 = pte_to_swp_entry(pte);
 856	WARN_ON(memcmp(&entry, &entry2, sizeof(entry)));
 857}
 858
 859static void __init pte_swap_tests(struct pgtable_debug_args *args)
 860{
 861	swp_entry_t swp;
 862	pte_t pte;
 863
 864	pr_debug("Validating PTE swap\n");
 865	pte = pfn_pte(args->fixed_pte_pfn, args->page_prot);
 866	swp = __pte_to_swp_entry(pte);
 867	pte = __swp_entry_to_pte(swp);
 868	WARN_ON(args->fixed_pte_pfn != pte_pfn(pte));
 869}
 870
 871#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
 872static void __init pmd_swap_tests(struct pgtable_debug_args *args)
 873{
 874	swp_entry_t swp;
 875	pmd_t pmd;
 876
 877	if (!has_transparent_hugepage())
 878		return;
 879
 880	pr_debug("Validating PMD swap\n");
 881	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
 882	swp = __pmd_to_swp_entry(pmd);
 883	pmd = __swp_entry_to_pmd(swp);
 884	WARN_ON(args->fixed_pmd_pfn != pmd_pfn(pmd));
 885}
 886#else  /* !CONFIG_ARCH_ENABLE_THP_MIGRATION */
 887static void __init pmd_swap_tests(struct pgtable_debug_args *args) { }
 888#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
 889
 890static void __init swap_migration_tests(struct pgtable_debug_args *args)
 891{
 892	struct page *page;
 893	swp_entry_t swp;
 894
 895	if (!IS_ENABLED(CONFIG_MIGRATION))
 896		return;
 897
 898	/*
 899	 * swap_migration_tests() requires a dedicated page as it needs to
 900	 * be locked before creating a migration entry from it. Locking the
 901	 * page that actually maps kernel text ('start_kernel') can be real
 902	 * problematic. Lets use the allocated page explicitly for this
 903	 * purpose.
 904	 */
 905	page = (args->pte_pfn != ULONG_MAX) ? pfn_to_page(args->pte_pfn) : NULL;
 906	if (!page)
 907		return;
 908
 909	pr_debug("Validating swap migration\n");
 910
 911	/*
 912	 * make_[readable|writable]_migration_entry() expects given page to
 913	 * be locked, otherwise it stumbles upon a BUG_ON().
 914	 */
 915	__SetPageLocked(page);
 916	swp = make_writable_migration_entry(page_to_pfn(page));
 917	WARN_ON(!is_migration_entry(swp));
 918	WARN_ON(!is_writable_migration_entry(swp));
 919
 920	swp = make_readable_migration_entry(swp_offset(swp));
 921	WARN_ON(!is_migration_entry(swp));
 922	WARN_ON(is_writable_migration_entry(swp));
 923
 924	swp = make_readable_migration_entry(page_to_pfn(page));
 925	WARN_ON(!is_migration_entry(swp));
 926	WARN_ON(is_writable_migration_entry(swp));
 927	__ClearPageLocked(page);
 928}
 929
 930#ifdef CONFIG_HUGETLB_PAGE
 931static void __init hugetlb_basic_tests(struct pgtable_debug_args *args)
 932{
 933	struct page *page;
 934	pte_t pte;
 935
 936	pr_debug("Validating HugeTLB basic\n");
 937	/*
 938	 * Accessing the page associated with the pfn is safe here,
 939	 * as it was previously derived from a real kernel symbol.
 940	 */
 941	page = pfn_to_page(args->fixed_pmd_pfn);
 942	pte = mk_huge_pte(page, args->page_prot);
 943
 944	WARN_ON(!huge_pte_dirty(huge_pte_mkdirty(pte)));
 945	WARN_ON(!huge_pte_write(huge_pte_mkwrite(huge_pte_wrprotect(pte))));
 946	WARN_ON(huge_pte_write(huge_pte_wrprotect(huge_pte_mkwrite(pte))));
 947
 948#ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
 949	pte = pfn_pte(args->fixed_pmd_pfn, args->page_prot);
 950
 951	WARN_ON(!pte_huge(arch_make_huge_pte(pte, PMD_SHIFT, VM_ACCESS_FLAGS)));
 952#endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
 953}
 954#else  /* !CONFIG_HUGETLB_PAGE */
 955static void __init hugetlb_basic_tests(struct pgtable_debug_args *args) { }
 956#endif /* CONFIG_HUGETLB_PAGE */
 957
 958#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 959static void __init pmd_thp_tests(struct pgtable_debug_args *args)
 960{
 961	pmd_t pmd;
 962
 963	if (!has_transparent_hugepage())
 964		return;
 965
 966	pr_debug("Validating PMD based THP\n");
 967	/*
 968	 * pmd_trans_huge() and pmd_present() must return positive after
 969	 * MMU invalidation with pmd_mkinvalid(). This behavior is an
 970	 * optimization for transparent huge page. pmd_trans_huge() must
 971	 * be true if pmd_page() returns a valid THP to avoid taking the
 972	 * pmd_lock when others walk over non transhuge pmds (i.e. there
 973	 * are no THP allocated). Especially when splitting a THP and
 974	 * removing the present bit from the pmd, pmd_trans_huge() still
 975	 * needs to return true. pmd_present() should be true whenever
 976	 * pmd_trans_huge() returns true.
 977	 */
 978	pmd = pfn_pmd(args->fixed_pmd_pfn, args->page_prot);
 979	WARN_ON(!pmd_trans_huge(pmd_mkhuge(pmd)));
 980
 981#ifndef __HAVE_ARCH_PMDP_INVALIDATE
 982	WARN_ON(!pmd_trans_huge(pmd_mkinvalid(pmd_mkhuge(pmd))));
 983	WARN_ON(!pmd_present(pmd_mkinvalid(pmd_mkhuge(pmd))));
 984#endif /* __HAVE_ARCH_PMDP_INVALIDATE */
 985}
 986
 987#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 988static void __init pud_thp_tests(struct pgtable_debug_args *args)
 989{
 990	pud_t pud;
 991
 992	if (!has_transparent_pud_hugepage())
 993		return;
 994
 995	pr_debug("Validating PUD based THP\n");
 996	pud = pfn_pud(args->fixed_pud_pfn, args->page_prot);
 997	WARN_ON(!pud_trans_huge(pud_mkhuge(pud)));
 998
 999	/*
1000	 * pud_mkinvalid() has been dropped for now. Enable back
1001	 * these tests when it comes back with a modified pud_present().
1002	 *
1003	 * WARN_ON(!pud_trans_huge(pud_mkinvalid(pud_mkhuge(pud))));
1004	 * WARN_ON(!pud_present(pud_mkinvalid(pud_mkhuge(pud))));
1005	 */
1006}
1007#else  /* !CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1008static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
1009#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
1010#else  /* !CONFIG_TRANSPARENT_HUGEPAGE */
1011static void __init pmd_thp_tests(struct pgtable_debug_args *args) { }
1012static void __init pud_thp_tests(struct pgtable_debug_args *args) { }
1013#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1014
1015static unsigned long __init get_random_vaddr(void)
1016{
1017	unsigned long random_vaddr, random_pages, total_user_pages;
1018
1019	total_user_pages = (TASK_SIZE - FIRST_USER_ADDRESS) / PAGE_SIZE;
1020
1021	random_pages = get_random_long() % total_user_pages;
1022	random_vaddr = FIRST_USER_ADDRESS + random_pages * PAGE_SIZE;
1023
1024	return random_vaddr;
1025}
1026
1027static void __init destroy_args(struct pgtable_debug_args *args)
1028{
1029	struct page *page = NULL;
1030
1031	/* Free (huge) page */
1032	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1033	    has_transparent_pud_hugepage() &&
1034	    args->pud_pfn != ULONG_MAX) {
1035		if (args->is_contiguous_page) {
1036			free_contig_range(args->pud_pfn,
1037					  (1 << (HPAGE_PUD_SHIFT - PAGE_SHIFT)));
1038		} else {
1039			page = pfn_to_page(args->pud_pfn);
1040			__free_pages(page, HPAGE_PUD_SHIFT - PAGE_SHIFT);
1041		}
1042
1043		args->pud_pfn = ULONG_MAX;
1044		args->pmd_pfn = ULONG_MAX;
1045		args->pte_pfn = ULONG_MAX;
1046	}
1047
1048	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1049	    has_transparent_hugepage() &&
1050	    args->pmd_pfn != ULONG_MAX) {
1051		if (args->is_contiguous_page) {
1052			free_contig_range(args->pmd_pfn, (1 << HPAGE_PMD_ORDER));
1053		} else {
1054			page = pfn_to_page(args->pmd_pfn);
1055			__free_pages(page, HPAGE_PMD_ORDER);
1056		}
1057
1058		args->pmd_pfn = ULONG_MAX;
1059		args->pte_pfn = ULONG_MAX;
1060	}
1061
1062	if (args->pte_pfn != ULONG_MAX) {
1063		page = pfn_to_page(args->pte_pfn);
1064		__free_page(page);
1065
1066		args->pte_pfn = ULONG_MAX;
1067	}
1068
1069	/* Free page table entries */
1070	if (args->start_ptep) {
1071		pte_free(args->mm, args->start_ptep);
1072		mm_dec_nr_ptes(args->mm);
1073	}
1074
1075	if (args->start_pmdp) {
1076		pmd_free(args->mm, args->start_pmdp);
1077		mm_dec_nr_pmds(args->mm);
1078	}
1079
1080	if (args->start_pudp) {
1081		pud_free(args->mm, args->start_pudp);
1082		mm_dec_nr_puds(args->mm);
1083	}
1084
1085	if (args->start_p4dp)
1086		p4d_free(args->mm, args->start_p4dp);
1087
1088	/* Free vma and mm struct */
1089	if (args->vma)
1090		vm_area_free(args->vma);
1091
1092	if (args->mm)
1093		mmdrop(args->mm);
1094}
1095
1096static struct page * __init
1097debug_vm_pgtable_alloc_huge_page(struct pgtable_debug_args *args, int order)
1098{
1099	struct page *page = NULL;
1100
1101#ifdef CONFIG_CONTIG_ALLOC
1102	if (order > MAX_PAGE_ORDER) {
1103		page = alloc_contig_pages((1 << order), GFP_KERNEL,
1104					  first_online_node, NULL);
1105		if (page) {
1106			args->is_contiguous_page = true;
1107			return page;
1108		}
1109	}
1110#endif
1111
1112	if (order <= MAX_PAGE_ORDER)
1113		page = alloc_pages(GFP_KERNEL, order);
1114
1115	return page;
1116}
1117
1118/*
1119 * Check if a physical memory range described by <pstart, pend> contains
1120 * an area that is of size psize, and aligned to psize.
1121 *
1122 * Don't use address 0, an all-zeroes physical address might mask bugs, and
1123 * it's not used on x86.
1124 */
1125static void  __init phys_align_check(phys_addr_t pstart,
1126				     phys_addr_t pend, unsigned long psize,
1127				     phys_addr_t *physp, unsigned long *alignp)
1128{
1129	phys_addr_t aligned_start, aligned_end;
1130
1131	if (pstart == 0)
1132		pstart = PAGE_SIZE;
1133
1134	aligned_start = ALIGN(pstart, psize);
1135	aligned_end = aligned_start + psize;
1136
1137	if (aligned_end > aligned_start && aligned_end <= pend) {
1138		*alignp = psize;
1139		*physp = aligned_start;
1140	}
1141}
1142
1143static void __init init_fixed_pfns(struct pgtable_debug_args *args)
1144{
1145	u64 idx;
1146	phys_addr_t phys, pstart, pend;
1147
1148	/*
1149	 * Initialize the fixed pfns. To do this, try to find a
1150	 * valid physical range, preferably aligned to PUD_SIZE,
1151	 * but settling for aligned to PMD_SIZE as a fallback. If
1152	 * neither of those is found, use the physical address of
1153	 * the start_kernel symbol.
1154	 *
1155	 * The memory doesn't need to be allocated, it just needs to exist
1156	 * as usable memory. It won't be touched.
1157	 *
1158	 * The alignment is recorded, and can be checked to see if we
1159	 * can run the tests that require an actual valid physical
1160	 * address range on some architectures ({pmd,pud}_huge_test
1161	 * on x86).
1162	 */
1163
1164	phys = __pa_symbol(&start_kernel);
1165	args->fixed_alignment = PAGE_SIZE;
1166
1167	for_each_mem_range(idx, &pstart, &pend) {
1168		/* First check for a PUD-aligned area */
1169		phys_align_check(pstart, pend, PUD_SIZE, &phys,
1170				 &args->fixed_alignment);
1171
1172		/* If a PUD-aligned area is found, we're done */
1173		if (args->fixed_alignment == PUD_SIZE)
1174			break;
1175
1176		/*
1177		 * If no PMD-aligned area found yet, check for one,
1178		 * but continue the loop to look for a PUD-aligned area.
1179		 */
1180		if (args->fixed_alignment < PMD_SIZE)
1181			phys_align_check(pstart, pend, PMD_SIZE, &phys,
1182					 &args->fixed_alignment);
1183	}
1184
1185	args->fixed_pgd_pfn = __phys_to_pfn(phys & PGDIR_MASK);
1186	args->fixed_p4d_pfn = __phys_to_pfn(phys & P4D_MASK);
1187	args->fixed_pud_pfn = __phys_to_pfn(phys & PUD_MASK);
1188	args->fixed_pmd_pfn = __phys_to_pfn(phys & PMD_MASK);
1189	args->fixed_pte_pfn = __phys_to_pfn(phys & PAGE_MASK);
1190	WARN_ON(!pfn_valid(args->fixed_pte_pfn));
1191}
1192
1193
1194static int __init init_args(struct pgtable_debug_args *args)
1195{
1196	struct page *page = NULL;
1197	int ret = 0;
1198
1199	/*
1200	 * Initialize the debugging data.
1201	 *
1202	 * vm_get_page_prot(VM_NONE) or vm_get_page_prot(VM_SHARED|VM_NONE)
1203	 * will help create page table entries with PROT_NONE permission as
1204	 * required for pxx_protnone_tests().
1205	 */
1206	memset(args, 0, sizeof(*args));
1207	args->vaddr              = get_random_vaddr();
1208	args->page_prot          = vm_get_page_prot(VM_ACCESS_FLAGS);
1209	args->page_prot_none     = vm_get_page_prot(VM_NONE);
1210	args->is_contiguous_page = false;
1211	args->pud_pfn            = ULONG_MAX;
1212	args->pmd_pfn            = ULONG_MAX;
1213	args->pte_pfn            = ULONG_MAX;
1214	args->fixed_pgd_pfn      = ULONG_MAX;
1215	args->fixed_p4d_pfn      = ULONG_MAX;
1216	args->fixed_pud_pfn      = ULONG_MAX;
1217	args->fixed_pmd_pfn      = ULONG_MAX;
1218	args->fixed_pte_pfn      = ULONG_MAX;
1219
1220	/* Allocate mm and vma */
1221	args->mm = mm_alloc();
1222	if (!args->mm) {
1223		pr_err("Failed to allocate mm struct\n");
1224		ret = -ENOMEM;
1225		goto error;
1226	}
1227
1228	args->vma = vm_area_alloc(args->mm);
1229	if (!args->vma) {
1230		pr_err("Failed to allocate vma\n");
1231		ret = -ENOMEM;
1232		goto error;
1233	}
1234
1235	/*
1236	 * Allocate page table entries. They will be modified in the tests.
1237	 * Lets save the page table entries so that they can be released
1238	 * when the tests are completed.
1239	 */
1240	args->pgdp = pgd_offset(args->mm, args->vaddr);
1241	args->p4dp = p4d_alloc(args->mm, args->pgdp, args->vaddr);
1242	if (!args->p4dp) {
1243		pr_err("Failed to allocate p4d entries\n");
1244		ret = -ENOMEM;
1245		goto error;
1246	}
1247	args->start_p4dp = p4d_offset(args->pgdp, 0UL);
1248	WARN_ON(!args->start_p4dp);
1249
1250	args->pudp = pud_alloc(args->mm, args->p4dp, args->vaddr);
1251	if (!args->pudp) {
1252		pr_err("Failed to allocate pud entries\n");
1253		ret = -ENOMEM;
1254		goto error;
1255	}
1256	args->start_pudp = pud_offset(args->p4dp, 0UL);
1257	WARN_ON(!args->start_pudp);
1258
1259	args->pmdp = pmd_alloc(args->mm, args->pudp, args->vaddr);
1260	if (!args->pmdp) {
1261		pr_err("Failed to allocate pmd entries\n");
1262		ret = -ENOMEM;
1263		goto error;
1264	}
1265	args->start_pmdp = pmd_offset(args->pudp, 0UL);
1266	WARN_ON(!args->start_pmdp);
1267
1268	if (pte_alloc(args->mm, args->pmdp)) {
1269		pr_err("Failed to allocate pte entries\n");
1270		ret = -ENOMEM;
1271		goto error;
1272	}
1273	args->start_ptep = pmd_pgtable(READ_ONCE(*args->pmdp));
1274	WARN_ON(!args->start_ptep);
1275
1276	init_fixed_pfns(args);
1277
1278	/*
1279	 * Allocate (huge) pages because some of the tests need to access
1280	 * the data in the pages. The corresponding tests will be skipped
1281	 * if we fail to allocate (huge) pages.
1282	 */
1283	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1284	    has_transparent_pud_hugepage()) {
1285		page = debug_vm_pgtable_alloc_huge_page(args,
1286				HPAGE_PUD_SHIFT - PAGE_SHIFT);
1287		if (page) {
1288			args->pud_pfn = page_to_pfn(page);
1289			args->pmd_pfn = args->pud_pfn;
1290			args->pte_pfn = args->pud_pfn;
1291			return 0;
1292		}
1293	}
1294
1295	if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
1296	    has_transparent_hugepage()) {
1297		page = debug_vm_pgtable_alloc_huge_page(args, HPAGE_PMD_ORDER);
1298		if (page) {
1299			args->pmd_pfn = page_to_pfn(page);
1300			args->pte_pfn = args->pmd_pfn;
1301			return 0;
1302		}
1303	}
1304
1305	page = alloc_page(GFP_KERNEL);
1306	if (page)
1307		args->pte_pfn = page_to_pfn(page);
1308
1309	return 0;
1310
1311error:
1312	destroy_args(args);
1313	return ret;
1314}
1315
1316static int __init debug_vm_pgtable(void)
1317{
1318	struct pgtable_debug_args args;
1319	spinlock_t *ptl = NULL;
1320	int idx, ret;
1321
1322	pr_info("Validating architecture page table helpers\n");
1323	ret = init_args(&args);
1324	if (ret)
1325		return ret;
1326
1327	/*
1328	 * Iterate over each possible vm_flags to make sure that all
1329	 * the basic page table transformation validations just hold
1330	 * true irrespective of the starting protection value for a
1331	 * given page table entry.
1332	 *
1333	 * Protection based vm_flags combinations are always linear
1334	 * and increasing i.e starting from VM_NONE and going up to
1335	 * (VM_SHARED | READ | WRITE | EXEC).
1336	 */
1337#define VM_FLAGS_START	(VM_NONE)
1338#define VM_FLAGS_END	(VM_SHARED | VM_EXEC | VM_WRITE | VM_READ)
1339
1340	for (idx = VM_FLAGS_START; idx <= VM_FLAGS_END; idx++) {
1341		pte_basic_tests(&args, idx);
1342		pmd_basic_tests(&args, idx);
1343		pud_basic_tests(&args, idx);
1344	}
1345
1346	/*
1347	 * Both P4D and PGD level tests are very basic which do not
1348	 * involve creating page table entries from the protection
1349	 * value and the given pfn. Hence just keep them out from
1350	 * the above iteration for now to save some test execution
1351	 * time.
1352	 */
1353	p4d_basic_tests(&args);
1354	pgd_basic_tests(&args);
1355
1356	pmd_leaf_tests(&args);
1357	pud_leaf_tests(&args);
1358
1359	pte_special_tests(&args);
1360	pte_protnone_tests(&args);
1361	pmd_protnone_tests(&args);
1362
1363	pte_devmap_tests(&args);
1364	pmd_devmap_tests(&args);
1365	pud_devmap_tests(&args);
1366
1367	pte_soft_dirty_tests(&args);
1368	pmd_soft_dirty_tests(&args);
1369	pte_swap_soft_dirty_tests(&args);
1370	pmd_swap_soft_dirty_tests(&args);
1371
1372	pte_swap_exclusive_tests(&args);
1373
1374	pte_swap_tests(&args);
1375	pmd_swap_tests(&args);
1376
1377	swap_migration_tests(&args);
1378
1379	pmd_thp_tests(&args);
1380	pud_thp_tests(&args);
1381
1382	hugetlb_basic_tests(&args);
1383
1384	/*
1385	 * Page table modifying tests. They need to hold
1386	 * proper page table lock.
1387	 */
1388
1389	args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl);
1390	pte_clear_tests(&args);
1391	pte_advanced_tests(&args);
1392	if (args.ptep)
1393		pte_unmap_unlock(args.ptep, ptl);
1394
1395	ptl = pmd_lock(args.mm, args.pmdp);
1396	pmd_clear_tests(&args);
1397	pmd_advanced_tests(&args);
1398	pmd_huge_tests(&args);
1399	pmd_populate_tests(&args);
1400	spin_unlock(ptl);
1401
1402	ptl = pud_lock(args.mm, args.pudp);
1403	pud_clear_tests(&args);
1404	pud_advanced_tests(&args);
1405	pud_huge_tests(&args);
1406	pud_populate_tests(&args);
1407	spin_unlock(ptl);
1408
1409	spin_lock(&(args.mm->page_table_lock));
1410	p4d_clear_tests(&args);
1411	pgd_clear_tests(&args);
1412	p4d_populate_tests(&args);
1413	pgd_populate_tests(&args);
1414	spin_unlock(&(args.mm->page_table_lock));
1415
1416	destroy_args(&args);
1417	return 0;
1418}
1419late_initcall(debug_vm_pgtable);