Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1#ifndef _ASM_X86_PGTABLE_H
  2#define _ASM_X86_PGTABLE_H
  3
 
  4#include <asm/page.h>
  5#include <asm/e820.h>
  6
  7#include <asm/pgtable_types.h>
  8
  9/*
 10 * Macro to mark a page protection value as UC-
 11 */
 12#define pgprot_noncached(prot)						\
 13	((boot_cpu_data.x86 > 3)					\
 14	 ? (__pgprot(pgprot_val(prot) |					\
 15		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
 16	 : (prot))
 17
 18#ifndef __ASSEMBLY__
 
 19#include <asm/x86_init.h>
 20
 21void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd);
 
 
 
 
 
 
 
 
 
 
 22void ptdump_walk_pgd_level_checkwx(void);
 
 
 
 
 
 
 
 23
 24#ifdef CONFIG_DEBUG_WX
 25#define debug_checkwx() ptdump_walk_pgd_level_checkwx()
 
 26#else
 27#define debug_checkwx() do { } while (0)
 
 28#endif
 29
 30/*
 31 * ZERO_PAGE is a global shared page that is always zero: used
 32 * for zero-mapped memory areas etc..
 33 */
 34extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
 35	__visible;
 36#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
 37
 38extern spinlock_t pgd_lock;
 39extern struct list_head pgd_list;
 40
 41extern struct mm_struct *pgd_page_get_mm(struct page *page);
 42
 43#ifdef CONFIG_PARAVIRT
 
 
 44#include <asm/paravirt.h>
 45#else  /* !CONFIG_PARAVIRT */
 46#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 47#define set_pte_at(mm, addr, ptep, pte)	native_set_pte_at(mm, addr, ptep, pte)
 48#define set_pmd_at(mm, addr, pmdp, pmd)	native_set_pmd_at(mm, addr, pmdp, pmd)
 49
 50#define set_pte_atomic(ptep, pte)					\
 51	native_set_pte_atomic(ptep, pte)
 52
 53#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
 54
 55#ifndef __PAGETABLE_PUD_FOLDED
 56#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
 57#define pgd_clear(pgd)			native_pgd_clear(pgd)
 
 
 
 
 
 
 
 
 58#endif
 59
 60#ifndef set_pud
 61# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
 62#endif
 63
 64#ifndef __PAGETABLE_PMD_FOLDED
 65#define pud_clear(pud)			native_pud_clear(pud)
 66#endif
 67
 68#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
 69#define pmd_clear(pmd)			native_pmd_clear(pmd)
 70
 71#define pte_update(mm, addr, ptep)              do { } while (0)
 72
 73#define pgd_val(x)	native_pgd_val(x)
 74#define __pgd(x)	native_make_pgd(x)
 75
 
 
 
 
 
 76#ifndef __PAGETABLE_PUD_FOLDED
 77#define pud_val(x)	native_pud_val(x)
 78#define __pud(x)	native_make_pud(x)
 79#endif
 80
 81#ifndef __PAGETABLE_PMD_FOLDED
 82#define pmd_val(x)	native_pmd_val(x)
 83#define __pmd(x)	native_make_pmd(x)
 84#endif
 85
 86#define pte_val(x)	native_pte_val(x)
 87#define __pte(x)	native_make_pte(x)
 88
 89#define arch_end_context_switch(prev)	do {} while(0)
 90
 91#endif	/* CONFIG_PARAVIRT */
 92
 93/*
 94 * The following only work if pte_present() is true.
 95 * Undefined behaviour if not..
 96 */
 97static inline int pte_dirty(pte_t pte)
 98{
 99	return pte_flags(pte) & _PAGE_DIRTY;
100}
101
102
103static inline u32 read_pkru(void)
104{
105	if (boot_cpu_has(X86_FEATURE_OSPKE))
106		return __read_pkru();
107	return 0;
108}
109
110static inline void write_pkru(u32 pkru)
111{
112	if (boot_cpu_has(X86_FEATURE_OSPKE))
113		__write_pkru(pkru);
114}
115
116static inline int pte_young(pte_t pte)
117{
118	return pte_flags(pte) & _PAGE_ACCESSED;
119}
120
121static inline int pmd_dirty(pmd_t pmd)
122{
123	return pmd_flags(pmd) & _PAGE_DIRTY;
124}
125
 
126static inline int pmd_young(pmd_t pmd)
127{
128	return pmd_flags(pmd) & _PAGE_ACCESSED;
129}
130
 
 
 
 
 
 
 
 
 
 
131static inline int pte_write(pte_t pte)
132{
133	return pte_flags(pte) & _PAGE_RW;
134}
135
136static inline int pte_huge(pte_t pte)
137{
138	return pte_flags(pte) & _PAGE_PSE;
139}
140
141static inline int pte_global(pte_t pte)
142{
143	return pte_flags(pte) & _PAGE_GLOBAL;
144}
145
146static inline int pte_exec(pte_t pte)
147{
148	return !(pte_flags(pte) & _PAGE_NX);
149}
150
151static inline int pte_special(pte_t pte)
152{
153	return pte_flags(pte) & _PAGE_SPECIAL;
154}
155
 
 
 
 
156static inline unsigned long pte_pfn(pte_t pte)
157{
158	return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
 
 
159}
160
161static inline unsigned long pmd_pfn(pmd_t pmd)
162{
163	return (pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
 
 
164}
165
166static inline unsigned long pud_pfn(pud_t pud)
167{
168	return (pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169}
170
171#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
172
 
173static inline int pmd_large(pmd_t pte)
174{
175	return pmd_flags(pte) & _PAGE_PSE;
176}
177
178#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 
179static inline int pmd_trans_huge(pmd_t pmd)
180{
181	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
182}
183
 
 
 
 
 
 
 
184#define has_transparent_hugepage has_transparent_hugepage
185static inline int has_transparent_hugepage(void)
186{
187	return boot_cpu_has(X86_FEATURE_PSE);
188}
189
190#ifdef __HAVE_ARCH_PTE_DEVMAP
191static inline int pmd_devmap(pmd_t pmd)
192{
193	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
194}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
195#endif
196#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
197
198static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
199{
200	pteval_t v = native_pte_val(pte);
201
202	return native_make_pte(v | set);
203}
204
205static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
206{
207	pteval_t v = native_pte_val(pte);
208
209	return native_make_pte(v & ~clear);
210}
211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212static inline pte_t pte_mkclean(pte_t pte)
213{
214	return pte_clear_flags(pte, _PAGE_DIRTY);
215}
216
217static inline pte_t pte_mkold(pte_t pte)
218{
219	return pte_clear_flags(pte, _PAGE_ACCESSED);
220}
221
222static inline pte_t pte_wrprotect(pte_t pte)
223{
224	return pte_clear_flags(pte, _PAGE_RW);
225}
226
227static inline pte_t pte_mkexec(pte_t pte)
228{
229	return pte_clear_flags(pte, _PAGE_NX);
230}
231
232static inline pte_t pte_mkdirty(pte_t pte)
233{
234	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
235}
236
237static inline pte_t pte_mkyoung(pte_t pte)
238{
239	return pte_set_flags(pte, _PAGE_ACCESSED);
240}
241
242static inline pte_t pte_mkwrite(pte_t pte)
243{
244	return pte_set_flags(pte, _PAGE_RW);
245}
246
247static inline pte_t pte_mkhuge(pte_t pte)
248{
249	return pte_set_flags(pte, _PAGE_PSE);
250}
251
252static inline pte_t pte_clrhuge(pte_t pte)
253{
254	return pte_clear_flags(pte, _PAGE_PSE);
255}
256
257static inline pte_t pte_mkglobal(pte_t pte)
258{
259	return pte_set_flags(pte, _PAGE_GLOBAL);
260}
261
262static inline pte_t pte_clrglobal(pte_t pte)
263{
264	return pte_clear_flags(pte, _PAGE_GLOBAL);
265}
266
267static inline pte_t pte_mkspecial(pte_t pte)
268{
269	return pte_set_flags(pte, _PAGE_SPECIAL);
270}
271
272static inline pte_t pte_mkdevmap(pte_t pte)
273{
274	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
275}
276
277static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
278{
279	pmdval_t v = native_pmd_val(pmd);
280
281	return __pmd(v | set);
282}
283
284static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
285{
286	pmdval_t v = native_pmd_val(pmd);
287
288	return __pmd(v & ~clear);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
289}
 
290
291static inline pmd_t pmd_mkold(pmd_t pmd)
292{
293	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
294}
295
296static inline pmd_t pmd_mkclean(pmd_t pmd)
297{
298	return pmd_clear_flags(pmd, _PAGE_DIRTY);
299}
300
301static inline pmd_t pmd_wrprotect(pmd_t pmd)
302{
303	return pmd_clear_flags(pmd, _PAGE_RW);
304}
305
306static inline pmd_t pmd_mkdirty(pmd_t pmd)
307{
308	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
309}
310
311static inline pmd_t pmd_mkdevmap(pmd_t pmd)
312{
313	return pmd_set_flags(pmd, _PAGE_DEVMAP);
314}
315
316static inline pmd_t pmd_mkhuge(pmd_t pmd)
317{
318	return pmd_set_flags(pmd, _PAGE_PSE);
319}
320
321static inline pmd_t pmd_mkyoung(pmd_t pmd)
322{
323	return pmd_set_flags(pmd, _PAGE_ACCESSED);
324}
325
326static inline pmd_t pmd_mkwrite(pmd_t pmd)
327{
328	return pmd_set_flags(pmd, _PAGE_RW);
329}
330
331static inline pmd_t pmd_mknotpresent(pmd_t pmd)
332{
333	return pmd_clear_flags(pmd, _PAGE_PRESENT | _PAGE_PROTNONE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334}
335
336#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
337static inline int pte_soft_dirty(pte_t pte)
338{
339	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
340}
341
342static inline int pmd_soft_dirty(pmd_t pmd)
343{
344	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
345}
346
 
 
 
 
 
347static inline pte_t pte_mksoft_dirty(pte_t pte)
348{
349	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
350}
351
352static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
353{
354	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
355}
356
 
 
 
 
 
357static inline pte_t pte_clear_soft_dirty(pte_t pte)
358{
359	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
360}
361
362static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
363{
364	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
365}
366
 
 
 
 
 
367#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
368
369/*
370 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
371 * can use those bits for other purposes, so leave them be.
372 */
373static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
374{
375	pgprotval_t protval = pgprot_val(pgprot);
376
377	if (protval & _PAGE_PRESENT)
378		protval &= __supported_pte_mask;
379
380	return protval;
381}
382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
383static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
384{
385	return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
386		     massage_pgprot(pgprot));
 
 
387}
388
389static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
390{
391	return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
392		     massage_pgprot(pgprot));
 
 
393}
394
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
395static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
396{
397	pteval_t val = pte_val(pte);
398
399	/*
400	 * Chop off the NX bit (if present), and add the NX portion of
401	 * the newprot (if present):
402	 */
403	val &= _PAGE_CHG_MASK;
404	val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
405
406	return __pte(val);
407}
408
409static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
410{
411	pmdval_t val = pmd_val(pmd);
412
413	val &= _HPAGE_CHG_MASK;
414	val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
415
416	return __pmd(val);
417}
418
419/* mprotect needs to preserve PAT bits when updating vm_page_prot */
 
 
 
420#define pgprot_modify pgprot_modify
421static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
422{
423	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
424	pgprotval_t addbits = pgprot_val(newprot);
425	return __pgprot(preservebits | addbits);
426}
427
428#define pte_pgprot(x) __pgprot(pte_flags(x))
429#define pmd_pgprot(x) __pgprot(pmd_flags(x))
430#define pud_pgprot(x) __pgprot(pud_flags(x))
 
431
432#define canon_pgprot(p) __pgprot(massage_pgprot(p))
433
434static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
435					 enum page_cache_mode pcm,
436					 enum page_cache_mode new_pcm)
437{
438	/*
439	 * PAT type is always WB for untracked ranges, so no need to check.
440	 */
441	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
442		return 1;
443
444	/*
445	 * Certain new memtypes are not allowed with certain
446	 * requested memtype:
447	 * - request is uncached, return cannot be write-back
448	 * - request is write-combine, return cannot be write-back
449	 * - request is write-through, return cannot be write-back
450	 * - request is write-through, return cannot be write-combine
451	 */
452	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
453	     new_pcm == _PAGE_CACHE_MODE_WB) ||
454	    (pcm == _PAGE_CACHE_MODE_WC &&
455	     new_pcm == _PAGE_CACHE_MODE_WB) ||
456	    (pcm == _PAGE_CACHE_MODE_WT &&
457	     new_pcm == _PAGE_CACHE_MODE_WB) ||
458	    (pcm == _PAGE_CACHE_MODE_WT &&
459	     new_pcm == _PAGE_CACHE_MODE_WC)) {
460		return 0;
461	}
462
463	return 1;
464}
465
466pmd_t *populate_extra_pmd(unsigned long vaddr);
467pte_t *populate_extra_pte(unsigned long vaddr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
468#endif	/* __ASSEMBLY__ */
469
 
470#ifdef CONFIG_X86_32
471# include <asm/pgtable_32.h>
472#else
473# include <asm/pgtable_64.h>
474#endif
475
476#ifndef __ASSEMBLY__
477#include <linux/mm_types.h>
478#include <linux/mmdebug.h>
479#include <linux/log2.h>
 
480
481static inline int pte_none(pte_t pte)
482{
483	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
484}
485
486#define __HAVE_ARCH_PTE_SAME
487static inline int pte_same(pte_t a, pte_t b)
488{
489	return a.pte == b.pte;
490}
491
492static inline int pte_present(pte_t a)
493{
494	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
495}
496
497#ifdef __HAVE_ARCH_PTE_DEVMAP
498static inline int pte_devmap(pte_t a)
499{
500	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
501}
502#endif
503
504#define pte_accessible pte_accessible
505static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
506{
507	if (pte_flags(a) & _PAGE_PRESENT)
508		return true;
509
510	if ((pte_flags(a) & _PAGE_PROTNONE) &&
511			mm_tlb_flush_pending(mm))
512		return true;
513
514	return false;
515}
516
517static inline int pte_hidden(pte_t pte)
518{
519	return pte_flags(pte) & _PAGE_HIDDEN;
520}
521
522static inline int pmd_present(pmd_t pmd)
523{
524	/*
525	 * Checking for _PAGE_PSE is needed too because
526	 * split_huge_page will temporarily clear the present bit (but
527	 * the _PAGE_PSE flag will remain set at all times while the
528	 * _PAGE_PRESENT bit is clear).
529	 */
530	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
531}
532
533#ifdef CONFIG_NUMA_BALANCING
534/*
535 * These work without NUMA balancing but the kernel does not care. See the
536 * comment in include/asm-generic/pgtable.h
537 */
538static inline int pte_protnone(pte_t pte)
539{
540	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
541		== _PAGE_PROTNONE;
542}
543
544static inline int pmd_protnone(pmd_t pmd)
545{
546	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
547		== _PAGE_PROTNONE;
548}
549#endif /* CONFIG_NUMA_BALANCING */
550
551static inline int pmd_none(pmd_t pmd)
552{
553	/* Only check low word on 32-bit platforms, since it might be
554	   out of sync with upper half. */
555	unsigned long val = native_pmd_val(pmd);
556	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
557}
558
559static inline unsigned long pmd_page_vaddr(pmd_t pmd)
560{
561	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
562}
563
564/*
565 * Currently stuck as a macro due to indirect forward reference to
566 * linux/mmzone.h's __section_mem_map_addr() definition:
567 */
568#define pmd_page(pmd)		\
569	pfn_to_page((pmd_val(pmd) & pmd_pfn_mask(pmd)) >> PAGE_SHIFT)
570
571/*
572 * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
573 *
574 * this macro returns the index of the entry in the pmd page which would
575 * control the given virtual address
576 */
577static inline unsigned long pmd_index(unsigned long address)
578{
579	return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
580}
581
582/*
583 * Conversion functions: convert a page and protection to a page entry,
584 * and a page entry and page directory to the page they refer to.
585 *
586 * (Currently stuck as a macro because of indirect forward reference
587 * to linux/mm.h:page_to_nid())
588 */
589#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
590
591/*
592 * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
593 *
594 * this function returns the index of the entry in the pte page which would
595 * control the given virtual address
596 */
597static inline unsigned long pte_index(unsigned long address)
598{
599	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
600}
601
602static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
603{
604	return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
605}
606
607static inline int pmd_bad(pmd_t pmd)
608{
609	return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
 
610}
611
612static inline unsigned long pages_to_mb(unsigned long npg)
613{
614	return npg >> (20 - PAGE_SHIFT);
615}
616
617#if CONFIG_PGTABLE_LEVELS > 2
618static inline int pud_none(pud_t pud)
619{
620	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
621}
622
623static inline int pud_present(pud_t pud)
624{
625	return pud_flags(pud) & _PAGE_PRESENT;
626}
627
628static inline unsigned long pud_page_vaddr(pud_t pud)
629{
630	return (unsigned long)__va(pud_val(pud) & pud_pfn_mask(pud));
631}
632
633/*
634 * Currently stuck as a macro due to indirect forward reference to
635 * linux/mmzone.h's __section_mem_map_addr() definition:
636 */
637#define pud_page(pud)		\
638	pfn_to_page((pud_val(pud) & pud_pfn_mask(pud)) >> PAGE_SHIFT)
639
640/* Find an entry in the second-level page table.. */
641static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
642{
643	return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
644}
645
 
646static inline int pud_large(pud_t pud)
647{
648	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
649		(_PAGE_PSE | _PAGE_PRESENT);
650}
651
652static inline int pud_bad(pud_t pud)
653{
654	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
655}
656#else
 
657static inline int pud_large(pud_t pud)
658{
659	return 0;
660}
661#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
662
663#if CONFIG_PGTABLE_LEVELS > 3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
664static inline int pgd_present(pgd_t pgd)
665{
 
 
666	return pgd_flags(pgd) & _PAGE_PRESENT;
667}
668
669static inline unsigned long pgd_page_vaddr(pgd_t pgd)
670{
671	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
672}
673
674/*
675 * Currently stuck as a macro due to indirect forward reference to
676 * linux/mmzone.h's __section_mem_map_addr() definition:
677 */
678#define pgd_page(pgd)		pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
679
680/* to find an entry in a page-table-directory. */
681static inline unsigned long pud_index(unsigned long address)
682{
683	return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
684}
685
686static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
687{
688	return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
689}
690
691static inline int pgd_bad(pgd_t pgd)
692{
693	return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
 
 
 
 
 
 
 
 
694}
695
696static inline int pgd_none(pgd_t pgd)
697{
 
 
698	/*
699	 * There is no need to do a workaround for the KNL stray
700	 * A/D bit erratum here.  PGDs only point to page tables
701	 * except on 32-bit non-PAE which is not supported on
702	 * KNL.
703	 */
704	return !native_pgd_val(pgd);
705}
706#endif	/* CONFIG_PGTABLE_LEVELS > 3 */
707
708#endif	/* __ASSEMBLY__ */
709
710/*
711 * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
712 *
713 * this macro returns the index of the entry in the pgd page which would
714 * control the given virtual address
715 */
716#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
717
718/*
719 * pgd_offset() returns a (pgd_t *)
720 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
721 */
722#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
723/*
724 * a shortcut which implies the use of the kernel's pgd, instead
725 * of a process's
726 */
727#define pgd_offset_k(address) pgd_offset(&init_mm, (address))
728
729
730#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
731#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
732
733#ifndef __ASSEMBLY__
734
735extern int direct_gbpages;
736void init_mem_mapping(void);
737void early_alloc_pgt_buf(void);
 
 
 
 
738
739#ifdef CONFIG_X86_64
740/* Realmode trampoline initialization. */
741extern pgd_t trampoline_pgd_entry;
742static inline void __meminit init_trampoline_default(void)
743{
744	/* Default trampoline pgd value */
745	trampoline_pgd_entry = init_level4_pgt[pgd_index(__PAGE_OFFSET)];
746}
747# ifdef CONFIG_RANDOMIZE_MEMORY
748void __meminit init_trampoline(void);
749# else
750#  define init_trampoline init_trampoline_default
751# endif
752#else
753static inline void init_trampoline(void) { }
754#endif
755
756/* local pte updates need not use xchg for locking */
757static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
758{
759	pte_t res = *ptep;
760
761	/* Pure native function needs no input for mm, addr */
762	native_pte_clear(NULL, 0, ptep);
763	return res;
764}
765
766static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
767{
768	pmd_t res = *pmdp;
769
770	native_pmd_clear(pmdp);
771	return res;
772}
773
774static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
775				     pte_t *ptep , pte_t pte)
776{
777	native_set_pte(ptep, pte);
 
 
 
778}
779
780static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
781				     pmd_t *pmdp , pmd_t pmd)
782{
783	native_set_pmd(pmdp, pmd);
 
784}
785
786#ifndef CONFIG_PARAVIRT
787/*
788 * Rules for using pte_update - it must be called after any PTE update which
789 * has not been done using the set_pte / clear_pte interfaces.  It is used by
790 * shadow mode hypervisors to resynchronize the shadow page tables.  Kernel PTE
791 * updates should either be sets, clears, or set_pte_atomic for P->P
792 * transitions, which means this hook should only be called for user PTEs.
793 * This hook implies a P->P protection or access change has taken place, which
794 * requires a subsequent TLB flush.
795 */
796#define pte_update(mm, addr, ptep)		do { } while (0)
797#endif
 
798
799/*
800 * We only update the dirty/accessed state if we set
801 * the dirty bit by hand in the kernel, since the hardware
802 * will do the accessed bit for us, and we don't want to
803 * race with other CPU's that might be updating the dirty
804 * bit at the same time.
805 */
806struct vm_area_struct;
807
808#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
809extern int ptep_set_access_flags(struct vm_area_struct *vma,
810				 unsigned long address, pte_t *ptep,
811				 pte_t entry, int dirty);
812
813#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
814extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
815				     unsigned long addr, pte_t *ptep);
816
817#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
818extern int ptep_clear_flush_young(struct vm_area_struct *vma,
819				  unsigned long address, pte_t *ptep);
820
821#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
822static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
823				       pte_t *ptep)
824{
825	pte_t pte = native_ptep_get_and_clear(ptep);
826	pte_update(mm, addr, ptep);
827	return pte;
828}
829
830#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
831static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
832					    unsigned long addr, pte_t *ptep,
833					    int full)
834{
835	pte_t pte;
836	if (full) {
837		/*
838		 * Full address destruction in progress; paravirt does not
839		 * care about updates and native needs no locking
840		 */
841		pte = native_local_ptep_get_and_clear(ptep);
 
842	} else {
843		pte = ptep_get_and_clear(mm, addr, ptep);
844	}
845	return pte;
846}
847
848#define __HAVE_ARCH_PTEP_SET_WRPROTECT
849static inline void ptep_set_wrprotect(struct mm_struct *mm,
850				      unsigned long addr, pte_t *ptep)
851{
852	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
853	pte_update(mm, addr, ptep);
854}
855
856#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
857
858#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
859
860#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
861extern int pmdp_set_access_flags(struct vm_area_struct *vma,
862				 unsigned long address, pmd_t *pmdp,
863				 pmd_t entry, int dirty);
 
 
 
864
865#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
866extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
867				     unsigned long addr, pmd_t *pmdp);
 
 
868
869#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
870extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
871				  unsigned long address, pmd_t *pmdp);
872
873
874#define __HAVE_ARCH_PMD_WRITE
875static inline int pmd_write(pmd_t pmd)
876{
877	return pmd_flags(pmd) & _PAGE_RW;
878}
879
880#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
881static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
882				       pmd_t *pmdp)
883{
884	return native_pmdp_get_and_clear(pmdp);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
885}
886
887#define __HAVE_ARCH_PMDP_SET_WRPROTECT
888static inline void pmdp_set_wrprotect(struct mm_struct *mm,
889				      unsigned long addr, pmd_t *pmdp)
890{
891	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
892}
893
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
894/*
895 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
896 *
897 *  dst - pointer to pgd range anwhere on a pgd page
898 *  src - ""
899 *  count - the number of pgds to copy.
900 *
901 * dst and src can be on the same page, but the range must not overlap,
902 * and must not cross a page boundary.
903 */
904static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
905{
906       memcpy(dst, src, count * sizeof(pgd_t));
 
 
 
 
 
 
 
907}
908
909#define PTE_SHIFT ilog2(PTRS_PER_PTE)
910static inline int page_level_shift(enum pg_level level)
911{
912	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
913}
914static inline unsigned long page_level_size(enum pg_level level)
915{
916	return 1UL << page_level_shift(level);
917}
918static inline unsigned long page_level_mask(enum pg_level level)
919{
920	return ~(page_level_size(level) - 1);
921}
922
923/*
924 * The x86 doesn't have any external MMU info: the kernel page
925 * tables contain all the necessary information.
926 */
927static inline void update_mmu_cache(struct vm_area_struct *vma,
928		unsigned long addr, pte_t *ptep)
929{
930}
931static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
932		unsigned long addr, pmd_t *pmd)
933{
934}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
935
936#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
937static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
938{
939	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
940}
941
942static inline int pte_swp_soft_dirty(pte_t pte)
943{
944	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
945}
946
947static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
948{
949	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
950}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
951#endif
952
953#define PKRU_AD_BIT 0x1
954#define PKRU_WD_BIT 0x2
955#define PKRU_BITS_PER_PKEY 2
 
 
956
957static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
958{
959	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
960	return !(pkru & (PKRU_AD_BIT << pkru_pkey_bits));
961}
962
963static inline bool __pkru_allows_write(u32 pkru, u16 pkey)
964{
965	int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
966	/*
967	 * Access-disable disables writes too so we need to check
968	 * both bits here.
969	 */
970	return !(pkru & ((PKRU_AD_BIT|PKRU_WD_BIT) << pkru_pkey_bits));
971}
972
 
 
 
 
 
 
 
 
 
 
 
973static inline u16 pte_flags_pkey(unsigned long pte_flags)
974{
975#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
976	/* ifdef to avoid doing 59-bit shift on 32-bit values */
977	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
978#else
979	return 0;
980#endif
981}
982
983#include <asm-generic/pgtable.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
984#endif	/* __ASSEMBLY__ */
985
986#endif /* _ASM_X86_PGTABLE_H */
v6.2
   1/* SPDX-License-Identifier: GPL-2.0 */
   2#ifndef _ASM_X86_PGTABLE_H
   3#define _ASM_X86_PGTABLE_H
   4
   5#include <linux/mem_encrypt.h>
   6#include <asm/page.h>
 
 
   7#include <asm/pgtable_types.h>
   8
   9/*
  10 * Macro to mark a page protection value as UC-
  11 */
  12#define pgprot_noncached(prot)						\
  13	((boot_cpu_data.x86 > 3)					\
  14	 ? (__pgprot(pgprot_val(prot) |					\
  15		     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))	\
  16	 : (prot))
  17
  18#ifndef __ASSEMBLY__
  19#include <linux/spinlock.h>
  20#include <asm/x86_init.h>
  21#include <asm/pkru.h>
  22#include <asm/fpu/api.h>
  23#include <asm/coco.h>
  24#include <asm-generic/pgtable_uffd.h>
  25#include <linux/page_table_check.h>
  26
  27extern pgd_t early_top_pgt[PTRS_PER_PGD];
  28bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd);
  29
  30void ptdump_walk_pgd_level(struct seq_file *m, struct mm_struct *mm);
  31void ptdump_walk_pgd_level_debugfs(struct seq_file *m, struct mm_struct *mm,
  32				   bool user);
  33void ptdump_walk_pgd_level_checkwx(void);
  34void ptdump_walk_user_pgd_level_checkwx(void);
  35
  36/*
  37 * Macros to add or remove encryption attribute
  38 */
  39#define pgprot_encrypted(prot)	__pgprot(cc_mkenc(pgprot_val(prot)))
  40#define pgprot_decrypted(prot)	__pgprot(cc_mkdec(pgprot_val(prot)))
  41
  42#ifdef CONFIG_DEBUG_WX
  43#define debug_checkwx()		ptdump_walk_pgd_level_checkwx()
  44#define debug_checkwx_user()	ptdump_walk_user_pgd_level_checkwx()
  45#else
  46#define debug_checkwx()		do { } while (0)
  47#define debug_checkwx_user()	do { } while (0)
  48#endif
  49
  50/*
  51 * ZERO_PAGE is a global shared page that is always zero: used
  52 * for zero-mapped memory areas etc..
  53 */
  54extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  55	__visible;
  56#define ZERO_PAGE(vaddr) ((void)(vaddr),virt_to_page(empty_zero_page))
  57
  58extern spinlock_t pgd_lock;
  59extern struct list_head pgd_list;
  60
  61extern struct mm_struct *pgd_page_get_mm(struct page *page);
  62
  63extern pmdval_t early_pmd_flags;
  64
  65#ifdef CONFIG_PARAVIRT_XXL
  66#include <asm/paravirt.h>
  67#else  /* !CONFIG_PARAVIRT_XXL */
  68#define set_pte(ptep, pte)		native_set_pte(ptep, pte)
 
 
  69
  70#define set_pte_atomic(ptep, pte)					\
  71	native_set_pte_atomic(ptep, pte)
  72
  73#define set_pmd(pmdp, pmd)		native_set_pmd(pmdp, pmd)
  74
  75#ifndef __PAGETABLE_P4D_FOLDED
  76#define set_pgd(pgdp, pgd)		native_set_pgd(pgdp, pgd)
  77#define pgd_clear(pgd)			(pgtable_l5_enabled() ? native_pgd_clear(pgd) : 0)
  78#endif
  79
  80#ifndef set_p4d
  81# define set_p4d(p4dp, p4d)		native_set_p4d(p4dp, p4d)
  82#endif
  83
  84#ifndef __PAGETABLE_PUD_FOLDED
  85#define p4d_clear(p4d)			native_p4d_clear(p4d)
  86#endif
  87
  88#ifndef set_pud
  89# define set_pud(pudp, pud)		native_set_pud(pudp, pud)
  90#endif
  91
  92#ifndef __PAGETABLE_PUD_FOLDED
  93#define pud_clear(pud)			native_pud_clear(pud)
  94#endif
  95
  96#define pte_clear(mm, addr, ptep)	native_pte_clear(mm, addr, ptep)
  97#define pmd_clear(pmd)			native_pmd_clear(pmd)
  98
 
 
  99#define pgd_val(x)	native_pgd_val(x)
 100#define __pgd(x)	native_make_pgd(x)
 101
 102#ifndef __PAGETABLE_P4D_FOLDED
 103#define p4d_val(x)	native_p4d_val(x)
 104#define __p4d(x)	native_make_p4d(x)
 105#endif
 106
 107#ifndef __PAGETABLE_PUD_FOLDED
 108#define pud_val(x)	native_pud_val(x)
 109#define __pud(x)	native_make_pud(x)
 110#endif
 111
 112#ifndef __PAGETABLE_PMD_FOLDED
 113#define pmd_val(x)	native_pmd_val(x)
 114#define __pmd(x)	native_make_pmd(x)
 115#endif
 116
 117#define pte_val(x)	native_pte_val(x)
 118#define __pte(x)	native_make_pte(x)
 119
 120#define arch_end_context_switch(prev)	do {} while(0)
 121#endif	/* CONFIG_PARAVIRT_XXL */
 
 122
 123/*
 124 * The following only work if pte_present() is true.
 125 * Undefined behaviour if not..
 126 */
 127static inline int pte_dirty(pte_t pte)
 128{
 129	return pte_flags(pte) & _PAGE_DIRTY;
 130}
 131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 132static inline int pte_young(pte_t pte)
 133{
 134	return pte_flags(pte) & _PAGE_ACCESSED;
 135}
 136
 137static inline int pmd_dirty(pmd_t pmd)
 138{
 139	return pmd_flags(pmd) & _PAGE_DIRTY;
 140}
 141
 142#define pmd_young pmd_young
 143static inline int pmd_young(pmd_t pmd)
 144{
 145	return pmd_flags(pmd) & _PAGE_ACCESSED;
 146}
 147
 148static inline int pud_dirty(pud_t pud)
 149{
 150	return pud_flags(pud) & _PAGE_DIRTY;
 151}
 152
 153static inline int pud_young(pud_t pud)
 154{
 155	return pud_flags(pud) & _PAGE_ACCESSED;
 156}
 157
 158static inline int pte_write(pte_t pte)
 159{
 160	return pte_flags(pte) & _PAGE_RW;
 161}
 162
 163static inline int pte_huge(pte_t pte)
 164{
 165	return pte_flags(pte) & _PAGE_PSE;
 166}
 167
 168static inline int pte_global(pte_t pte)
 169{
 170	return pte_flags(pte) & _PAGE_GLOBAL;
 171}
 172
 173static inline int pte_exec(pte_t pte)
 174{
 175	return !(pte_flags(pte) & _PAGE_NX);
 176}
 177
 178static inline int pte_special(pte_t pte)
 179{
 180	return pte_flags(pte) & _PAGE_SPECIAL;
 181}
 182
 183/* Entries that were set to PROT_NONE are inverted */
 184
 185static inline u64 protnone_mask(u64 val);
 186
 187static inline unsigned long pte_pfn(pte_t pte)
 188{
 189	phys_addr_t pfn = pte_val(pte);
 190	pfn ^= protnone_mask(pfn);
 191	return (pfn & PTE_PFN_MASK) >> PAGE_SHIFT;
 192}
 193
 194static inline unsigned long pmd_pfn(pmd_t pmd)
 195{
 196	phys_addr_t pfn = pmd_val(pmd);
 197	pfn ^= protnone_mask(pfn);
 198	return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
 199}
 200
 201static inline unsigned long pud_pfn(pud_t pud)
 202{
 203	phys_addr_t pfn = pud_val(pud);
 204	pfn ^= protnone_mask(pfn);
 205	return (pfn & pud_pfn_mask(pud)) >> PAGE_SHIFT;
 206}
 207
 208static inline unsigned long p4d_pfn(p4d_t p4d)
 209{
 210	return (p4d_val(p4d) & p4d_pfn_mask(p4d)) >> PAGE_SHIFT;
 211}
 212
 213static inline unsigned long pgd_pfn(pgd_t pgd)
 214{
 215	return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
 216}
 217
 218#define p4d_leaf	p4d_large
 219static inline int p4d_large(p4d_t p4d)
 220{
 221	/* No 512 GiB pages yet */
 222	return 0;
 223}
 224
 225#define pte_page(pte)	pfn_to_page(pte_pfn(pte))
 226
 227#define pmd_leaf	pmd_large
 228static inline int pmd_large(pmd_t pte)
 229{
 230	return pmd_flags(pte) & _PAGE_PSE;
 231}
 232
 233#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 234/* NOTE: when predicate huge page, consider also pmd_devmap, or use pmd_large */
 235static inline int pmd_trans_huge(pmd_t pmd)
 236{
 237	return (pmd_val(pmd) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 238}
 239
 240#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 241static inline int pud_trans_huge(pud_t pud)
 242{
 243	return (pud_val(pud) & (_PAGE_PSE|_PAGE_DEVMAP)) == _PAGE_PSE;
 244}
 245#endif
 246
 247#define has_transparent_hugepage has_transparent_hugepage
 248static inline int has_transparent_hugepage(void)
 249{
 250	return boot_cpu_has(X86_FEATURE_PSE);
 251}
 252
 253#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
 254static inline int pmd_devmap(pmd_t pmd)
 255{
 256	return !!(pmd_val(pmd) & _PAGE_DEVMAP);
 257}
 258
 259#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
 260static inline int pud_devmap(pud_t pud)
 261{
 262	return !!(pud_val(pud) & _PAGE_DEVMAP);
 263}
 264#else
 265static inline int pud_devmap(pud_t pud)
 266{
 267	return 0;
 268}
 269#endif
 270
 271static inline int pgd_devmap(pgd_t pgd)
 272{
 273	return 0;
 274}
 275#endif
 276#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 277
 278static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
 279{
 280	pteval_t v = native_pte_val(pte);
 281
 282	return native_make_pte(v | set);
 283}
 284
 285static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
 286{
 287	pteval_t v = native_pte_val(pte);
 288
 289	return native_make_pte(v & ~clear);
 290}
 291
 292#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
 293static inline int pte_uffd_wp(pte_t pte)
 294{
 295	bool wp = pte_flags(pte) & _PAGE_UFFD_WP;
 296
 297#ifdef CONFIG_DEBUG_VM
 298	/*
 299	 * Having write bit for wr-protect-marked present ptes is fatal,
 300	 * because it means the uffd-wp bit will be ignored and write will
 301	 * just go through.
 302	 *
 303	 * Use any chance of pgtable walking to verify this (e.g., when
 304	 * page swapped out or being migrated for all purposes). It means
 305	 * something is already wrong.  Tell the admin even before the
 306	 * process crashes. We also nail it with wrong pgtable setup.
 307	 */
 308	WARN_ON_ONCE(wp && pte_write(pte));
 309#endif
 310
 311	return wp;
 312}
 313
 314static inline pte_t pte_mkuffd_wp(pte_t pte)
 315{
 316	return pte_set_flags(pte, _PAGE_UFFD_WP);
 317}
 318
 319static inline pte_t pte_clear_uffd_wp(pte_t pte)
 320{
 321	return pte_clear_flags(pte, _PAGE_UFFD_WP);
 322}
 323#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
 324
 325static inline pte_t pte_mkclean(pte_t pte)
 326{
 327	return pte_clear_flags(pte, _PAGE_DIRTY);
 328}
 329
 330static inline pte_t pte_mkold(pte_t pte)
 331{
 332	return pte_clear_flags(pte, _PAGE_ACCESSED);
 333}
 334
 335static inline pte_t pte_wrprotect(pte_t pte)
 336{
 337	return pte_clear_flags(pte, _PAGE_RW);
 338}
 339
 340static inline pte_t pte_mkexec(pte_t pte)
 341{
 342	return pte_clear_flags(pte, _PAGE_NX);
 343}
 344
 345static inline pte_t pte_mkdirty(pte_t pte)
 346{
 347	return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 348}
 349
 350static inline pte_t pte_mkyoung(pte_t pte)
 351{
 352	return pte_set_flags(pte, _PAGE_ACCESSED);
 353}
 354
 355static inline pte_t pte_mkwrite(pte_t pte)
 356{
 357	return pte_set_flags(pte, _PAGE_RW);
 358}
 359
 360static inline pte_t pte_mkhuge(pte_t pte)
 361{
 362	return pte_set_flags(pte, _PAGE_PSE);
 363}
 364
 365static inline pte_t pte_clrhuge(pte_t pte)
 366{
 367	return pte_clear_flags(pte, _PAGE_PSE);
 368}
 369
 370static inline pte_t pte_mkglobal(pte_t pte)
 371{
 372	return pte_set_flags(pte, _PAGE_GLOBAL);
 373}
 374
 375static inline pte_t pte_clrglobal(pte_t pte)
 376{
 377	return pte_clear_flags(pte, _PAGE_GLOBAL);
 378}
 379
 380static inline pte_t pte_mkspecial(pte_t pte)
 381{
 382	return pte_set_flags(pte, _PAGE_SPECIAL);
 383}
 384
 385static inline pte_t pte_mkdevmap(pte_t pte)
 386{
 387	return pte_set_flags(pte, _PAGE_SPECIAL|_PAGE_DEVMAP);
 388}
 389
 390static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
 391{
 392	pmdval_t v = native_pmd_val(pmd);
 393
 394	return native_make_pmd(v | set);
 395}
 396
 397static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
 398{
 399	pmdval_t v = native_pmd_val(pmd);
 400
 401	return native_make_pmd(v & ~clear);
 402}
 403
 404#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
 405static inline int pmd_uffd_wp(pmd_t pmd)
 406{
 407	return pmd_flags(pmd) & _PAGE_UFFD_WP;
 408}
 409
 410static inline pmd_t pmd_mkuffd_wp(pmd_t pmd)
 411{
 412	return pmd_set_flags(pmd, _PAGE_UFFD_WP);
 413}
 414
 415static inline pmd_t pmd_clear_uffd_wp(pmd_t pmd)
 416{
 417	return pmd_clear_flags(pmd, _PAGE_UFFD_WP);
 418}
 419#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
 420
 421static inline pmd_t pmd_mkold(pmd_t pmd)
 422{
 423	return pmd_clear_flags(pmd, _PAGE_ACCESSED);
 424}
 425
 426static inline pmd_t pmd_mkclean(pmd_t pmd)
 427{
 428	return pmd_clear_flags(pmd, _PAGE_DIRTY);
 429}
 430
 431static inline pmd_t pmd_wrprotect(pmd_t pmd)
 432{
 433	return pmd_clear_flags(pmd, _PAGE_RW);
 434}
 435
 436static inline pmd_t pmd_mkdirty(pmd_t pmd)
 437{
 438	return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 439}
 440
 441static inline pmd_t pmd_mkdevmap(pmd_t pmd)
 442{
 443	return pmd_set_flags(pmd, _PAGE_DEVMAP);
 444}
 445
 446static inline pmd_t pmd_mkhuge(pmd_t pmd)
 447{
 448	return pmd_set_flags(pmd, _PAGE_PSE);
 449}
 450
 451static inline pmd_t pmd_mkyoung(pmd_t pmd)
 452{
 453	return pmd_set_flags(pmd, _PAGE_ACCESSED);
 454}
 455
 456static inline pmd_t pmd_mkwrite(pmd_t pmd)
 457{
 458	return pmd_set_flags(pmd, _PAGE_RW);
 459}
 460
 461static inline pud_t pud_set_flags(pud_t pud, pudval_t set)
 462{
 463	pudval_t v = native_pud_val(pud);
 464
 465	return native_make_pud(v | set);
 466}
 467
 468static inline pud_t pud_clear_flags(pud_t pud, pudval_t clear)
 469{
 470	pudval_t v = native_pud_val(pud);
 471
 472	return native_make_pud(v & ~clear);
 473}
 474
 475static inline pud_t pud_mkold(pud_t pud)
 476{
 477	return pud_clear_flags(pud, _PAGE_ACCESSED);
 478}
 479
 480static inline pud_t pud_mkclean(pud_t pud)
 481{
 482	return pud_clear_flags(pud, _PAGE_DIRTY);
 483}
 484
 485static inline pud_t pud_wrprotect(pud_t pud)
 486{
 487	return pud_clear_flags(pud, _PAGE_RW);
 488}
 489
 490static inline pud_t pud_mkdirty(pud_t pud)
 491{
 492	return pud_set_flags(pud, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
 493}
 494
 495static inline pud_t pud_mkdevmap(pud_t pud)
 496{
 497	return pud_set_flags(pud, _PAGE_DEVMAP);
 498}
 499
 500static inline pud_t pud_mkhuge(pud_t pud)
 501{
 502	return pud_set_flags(pud, _PAGE_PSE);
 503}
 504
 505static inline pud_t pud_mkyoung(pud_t pud)
 506{
 507	return pud_set_flags(pud, _PAGE_ACCESSED);
 508}
 509
 510static inline pud_t pud_mkwrite(pud_t pud)
 511{
 512	return pud_set_flags(pud, _PAGE_RW);
 513}
 514
 515#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
 516static inline int pte_soft_dirty(pte_t pte)
 517{
 518	return pte_flags(pte) & _PAGE_SOFT_DIRTY;
 519}
 520
 521static inline int pmd_soft_dirty(pmd_t pmd)
 522{
 523	return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
 524}
 525
 526static inline int pud_soft_dirty(pud_t pud)
 527{
 528	return pud_flags(pud) & _PAGE_SOFT_DIRTY;
 529}
 530
 531static inline pte_t pte_mksoft_dirty(pte_t pte)
 532{
 533	return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
 534}
 535
 536static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
 537{
 538	return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
 539}
 540
 541static inline pud_t pud_mksoft_dirty(pud_t pud)
 542{
 543	return pud_set_flags(pud, _PAGE_SOFT_DIRTY);
 544}
 545
 546static inline pte_t pte_clear_soft_dirty(pte_t pte)
 547{
 548	return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
 549}
 550
 551static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
 552{
 553	return pmd_clear_flags(pmd, _PAGE_SOFT_DIRTY);
 554}
 555
 556static inline pud_t pud_clear_soft_dirty(pud_t pud)
 557{
 558	return pud_clear_flags(pud, _PAGE_SOFT_DIRTY);
 559}
 560
 561#endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
 562
 563/*
 564 * Mask out unsupported bits in a present pgprot.  Non-present pgprots
 565 * can use those bits for other purposes, so leave them be.
 566 */
 567static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
 568{
 569	pgprotval_t protval = pgprot_val(pgprot);
 570
 571	if (protval & _PAGE_PRESENT)
 572		protval &= __supported_pte_mask;
 573
 574	return protval;
 575}
 576
 577static inline pgprotval_t check_pgprot(pgprot_t pgprot)
 578{
 579	pgprotval_t massaged_val = massage_pgprot(pgprot);
 580
 581	/* mmdebug.h can not be included here because of dependencies */
 582#ifdef CONFIG_DEBUG_VM
 583	WARN_ONCE(pgprot_val(pgprot) != massaged_val,
 584		  "attempted to set unsupported pgprot: %016llx "
 585		  "bits: %016llx supported: %016llx\n",
 586		  (u64)pgprot_val(pgprot),
 587		  (u64)pgprot_val(pgprot) ^ massaged_val,
 588		  (u64)__supported_pte_mask);
 589#endif
 590
 591	return massaged_val;
 592}
 593
 594static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
 595{
 596	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
 597	pfn ^= protnone_mask(pgprot_val(pgprot));
 598	pfn &= PTE_PFN_MASK;
 599	return __pte(pfn | check_pgprot(pgprot));
 600}
 601
 602static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
 603{
 604	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
 605	pfn ^= protnone_mask(pgprot_val(pgprot));
 606	pfn &= PHYSICAL_PMD_PAGE_MASK;
 607	return __pmd(pfn | check_pgprot(pgprot));
 608}
 609
 610static inline pud_t pfn_pud(unsigned long page_nr, pgprot_t pgprot)
 611{
 612	phys_addr_t pfn = (phys_addr_t)page_nr << PAGE_SHIFT;
 613	pfn ^= protnone_mask(pgprot_val(pgprot));
 614	pfn &= PHYSICAL_PUD_PAGE_MASK;
 615	return __pud(pfn | check_pgprot(pgprot));
 616}
 617
 618static inline pmd_t pmd_mkinvalid(pmd_t pmd)
 619{
 620	return pfn_pmd(pmd_pfn(pmd),
 621		      __pgprot(pmd_flags(pmd) & ~(_PAGE_PRESENT|_PAGE_PROTNONE)));
 622}
 623
 624static inline u64 flip_protnone_guard(u64 oldval, u64 val, u64 mask);
 625
 626static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 627{
 628	pteval_t val = pte_val(pte), oldval = val;
 629
 630	/*
 631	 * Chop off the NX bit (if present), and add the NX portion of
 632	 * the newprot (if present):
 633	 */
 634	val &= _PAGE_CHG_MASK;
 635	val |= check_pgprot(newprot) & ~_PAGE_CHG_MASK;
 636	val = flip_protnone_guard(oldval, val, PTE_PFN_MASK);
 637	return __pte(val);
 638}
 639
 640static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
 641{
 642	pmdval_t val = pmd_val(pmd), oldval = val;
 643
 644	val &= _HPAGE_CHG_MASK;
 645	val |= check_pgprot(newprot) & ~_HPAGE_CHG_MASK;
 646	val = flip_protnone_guard(oldval, val, PHYSICAL_PMD_PAGE_MASK);
 647	return __pmd(val);
 648}
 649
 650/*
 651 * mprotect needs to preserve PAT and encryption bits when updating
 652 * vm_page_prot
 653 */
 654#define pgprot_modify pgprot_modify
 655static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
 656{
 657	pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
 658	pgprotval_t addbits = pgprot_val(newprot) & ~_PAGE_CHG_MASK;
 659	return __pgprot(preservebits | addbits);
 660}
 661
 662#define pte_pgprot(x) __pgprot(pte_flags(x))
 663#define pmd_pgprot(x) __pgprot(pmd_flags(x))
 664#define pud_pgprot(x) __pgprot(pud_flags(x))
 665#define p4d_pgprot(x) __pgprot(p4d_flags(x))
 666
 667#define canon_pgprot(p) __pgprot(massage_pgprot(p))
 668
 669static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
 670					 enum page_cache_mode pcm,
 671					 enum page_cache_mode new_pcm)
 672{
 673	/*
 674	 * PAT type is always WB for untracked ranges, so no need to check.
 675	 */
 676	if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
 677		return 1;
 678
 679	/*
 680	 * Certain new memtypes are not allowed with certain
 681	 * requested memtype:
 682	 * - request is uncached, return cannot be write-back
 683	 * - request is write-combine, return cannot be write-back
 684	 * - request is write-through, return cannot be write-back
 685	 * - request is write-through, return cannot be write-combine
 686	 */
 687	if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
 688	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 689	    (pcm == _PAGE_CACHE_MODE_WC &&
 690	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 691	    (pcm == _PAGE_CACHE_MODE_WT &&
 692	     new_pcm == _PAGE_CACHE_MODE_WB) ||
 693	    (pcm == _PAGE_CACHE_MODE_WT &&
 694	     new_pcm == _PAGE_CACHE_MODE_WC)) {
 695		return 0;
 696	}
 697
 698	return 1;
 699}
 700
 701pmd_t *populate_extra_pmd(unsigned long vaddr);
 702pte_t *populate_extra_pte(unsigned long vaddr);
 703
 704#ifdef CONFIG_PAGE_TABLE_ISOLATION
 705pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd);
 706
 707/*
 708 * Take a PGD location (pgdp) and a pgd value that needs to be set there.
 709 * Populates the user and returns the resulting PGD that must be set in
 710 * the kernel copy of the page tables.
 711 */
 712static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
 713{
 714	if (!static_cpu_has(X86_FEATURE_PTI))
 715		return pgd;
 716	return __pti_set_user_pgtbl(pgdp, pgd);
 717}
 718#else   /* CONFIG_PAGE_TABLE_ISOLATION */
 719static inline pgd_t pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
 720{
 721	return pgd;
 722}
 723#endif  /* CONFIG_PAGE_TABLE_ISOLATION */
 724
 725#endif	/* __ASSEMBLY__ */
 726
 727
 728#ifdef CONFIG_X86_32
 729# include <asm/pgtable_32.h>
 730#else
 731# include <asm/pgtable_64.h>
 732#endif
 733
 734#ifndef __ASSEMBLY__
 735#include <linux/mm_types.h>
 736#include <linux/mmdebug.h>
 737#include <linux/log2.h>
 738#include <asm/fixmap.h>
 739
 740static inline int pte_none(pte_t pte)
 741{
 742	return !(pte.pte & ~(_PAGE_KNL_ERRATUM_MASK));
 743}
 744
 745#define __HAVE_ARCH_PTE_SAME
 746static inline int pte_same(pte_t a, pte_t b)
 747{
 748	return a.pte == b.pte;
 749}
 750
 751static inline int pte_present(pte_t a)
 752{
 753	return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
 754}
 755
 756#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
 757static inline int pte_devmap(pte_t a)
 758{
 759	return (pte_flags(a) & _PAGE_DEVMAP) == _PAGE_DEVMAP;
 760}
 761#endif
 762
 763#define pte_accessible pte_accessible
 764static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
 765{
 766	if (pte_flags(a) & _PAGE_PRESENT)
 767		return true;
 768
 769	if ((pte_flags(a) & _PAGE_PROTNONE) &&
 770			atomic_read(&mm->tlb_flush_pending))
 771		return true;
 772
 773	return false;
 774}
 775
 
 
 
 
 
 776static inline int pmd_present(pmd_t pmd)
 777{
 778	/*
 779	 * Checking for _PAGE_PSE is needed too because
 780	 * split_huge_page will temporarily clear the present bit (but
 781	 * the _PAGE_PSE flag will remain set at all times while the
 782	 * _PAGE_PRESENT bit is clear).
 783	 */
 784	return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
 785}
 786
 787#ifdef CONFIG_NUMA_BALANCING
 788/*
 789 * These work without NUMA balancing but the kernel does not care. See the
 790 * comment in include/linux/pgtable.h
 791 */
 792static inline int pte_protnone(pte_t pte)
 793{
 794	return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
 795		== _PAGE_PROTNONE;
 796}
 797
 798static inline int pmd_protnone(pmd_t pmd)
 799{
 800	return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
 801		== _PAGE_PROTNONE;
 802}
 803#endif /* CONFIG_NUMA_BALANCING */
 804
 805static inline int pmd_none(pmd_t pmd)
 806{
 807	/* Only check low word on 32-bit platforms, since it might be
 808	   out of sync with upper half. */
 809	unsigned long val = native_pmd_val(pmd);
 810	return (val & ~_PAGE_KNL_ERRATUM_MASK) == 0;
 811}
 812
 813static inline unsigned long pmd_page_vaddr(pmd_t pmd)
 814{
 815	return (unsigned long)__va(pmd_val(pmd) & pmd_pfn_mask(pmd));
 816}
 817
 818/*
 819 * Currently stuck as a macro due to indirect forward reference to
 820 * linux/mmzone.h's __section_mem_map_addr() definition:
 821 */
 822#define pmd_page(pmd)	pfn_to_page(pmd_pfn(pmd))
 
 
 
 
 
 
 
 
 
 
 
 
 823
 824/*
 825 * Conversion functions: convert a page and protection to a page entry,
 826 * and a page entry and page directory to the page they refer to.
 827 *
 828 * (Currently stuck as a macro because of indirect forward reference
 829 * to linux/mm.h:page_to_nid())
 830 */
 831#define mk_pte(page, pgprot)   pfn_pte(page_to_pfn(page), (pgprot))
 832
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 833static inline int pmd_bad(pmd_t pmd)
 834{
 835	return (pmd_flags(pmd) & ~(_PAGE_USER | _PAGE_ACCESSED)) !=
 836	       (_KERNPG_TABLE & ~_PAGE_ACCESSED);
 837}
 838
 839static inline unsigned long pages_to_mb(unsigned long npg)
 840{
 841	return npg >> (20 - PAGE_SHIFT);
 842}
 843
 844#if CONFIG_PGTABLE_LEVELS > 2
 845static inline int pud_none(pud_t pud)
 846{
 847	return (native_pud_val(pud) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
 848}
 849
 850static inline int pud_present(pud_t pud)
 851{
 852	return pud_flags(pud) & _PAGE_PRESENT;
 853}
 854
 855static inline pmd_t *pud_pgtable(pud_t pud)
 856{
 857	return (pmd_t *)__va(pud_val(pud) & pud_pfn_mask(pud));
 858}
 859
 860/*
 861 * Currently stuck as a macro due to indirect forward reference to
 862 * linux/mmzone.h's __section_mem_map_addr() definition:
 863 */
 864#define pud_page(pud)	pfn_to_page(pud_pfn(pud))
 
 
 
 
 
 
 
 865
 866#define pud_leaf	pud_large
 867static inline int pud_large(pud_t pud)
 868{
 869	return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
 870		(_PAGE_PSE | _PAGE_PRESENT);
 871}
 872
 873static inline int pud_bad(pud_t pud)
 874{
 875	return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
 876}
 877#else
 878#define pud_leaf	pud_large
 879static inline int pud_large(pud_t pud)
 880{
 881	return 0;
 882}
 883#endif	/* CONFIG_PGTABLE_LEVELS > 2 */
 884
 885#if CONFIG_PGTABLE_LEVELS > 3
 886static inline int p4d_none(p4d_t p4d)
 887{
 888	return (native_p4d_val(p4d) & ~(_PAGE_KNL_ERRATUM_MASK)) == 0;
 889}
 890
 891static inline int p4d_present(p4d_t p4d)
 892{
 893	return p4d_flags(p4d) & _PAGE_PRESENT;
 894}
 895
 896static inline pud_t *p4d_pgtable(p4d_t p4d)
 897{
 898	return (pud_t *)__va(p4d_val(p4d) & p4d_pfn_mask(p4d));
 899}
 900
 901/*
 902 * Currently stuck as a macro due to indirect forward reference to
 903 * linux/mmzone.h's __section_mem_map_addr() definition:
 904 */
 905#define p4d_page(p4d)	pfn_to_page(p4d_pfn(p4d))
 906
 907static inline int p4d_bad(p4d_t p4d)
 908{
 909	unsigned long ignore_flags = _KERNPG_TABLE | _PAGE_USER;
 910
 911	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
 912		ignore_flags |= _PAGE_NX;
 913
 914	return (p4d_flags(p4d) & ~ignore_flags) != 0;
 915}
 916#endif  /* CONFIG_PGTABLE_LEVELS > 3 */
 917
 918static inline unsigned long p4d_index(unsigned long address)
 919{
 920	return (address >> P4D_SHIFT) & (PTRS_PER_P4D - 1);
 921}
 922
 923#if CONFIG_PGTABLE_LEVELS > 4
 924static inline int pgd_present(pgd_t pgd)
 925{
 926	if (!pgtable_l5_enabled())
 927		return 1;
 928	return pgd_flags(pgd) & _PAGE_PRESENT;
 929}
 930
 931static inline unsigned long pgd_page_vaddr(pgd_t pgd)
 932{
 933	return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
 934}
 935
 936/*
 937 * Currently stuck as a macro due to indirect forward reference to
 938 * linux/mmzone.h's __section_mem_map_addr() definition:
 939 */
 940#define pgd_page(pgd)	pfn_to_page(pgd_pfn(pgd))
 941
 942/* to find an entry in a page-table-directory. */
 943static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
 944{
 945	if (!pgtable_l5_enabled())
 946		return (p4d_t *)pgd;
 947	return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address);
 
 
 
 948}
 949
 950static inline int pgd_bad(pgd_t pgd)
 951{
 952	unsigned long ignore_flags = _PAGE_USER;
 953
 954	if (!pgtable_l5_enabled())
 955		return 0;
 956
 957	if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION))
 958		ignore_flags |= _PAGE_NX;
 959
 960	return (pgd_flags(pgd) & ~ignore_flags) != _KERNPG_TABLE;
 961}
 962
 963static inline int pgd_none(pgd_t pgd)
 964{
 965	if (!pgtable_l5_enabled())
 966		return 0;
 967	/*
 968	 * There is no need to do a workaround for the KNL stray
 969	 * A/D bit erratum here.  PGDs only point to page tables
 970	 * except on 32-bit non-PAE which is not supported on
 971	 * KNL.
 972	 */
 973	return !native_pgd_val(pgd);
 974}
 975#endif	/* CONFIG_PGTABLE_LEVELS > 4 */
 976
 977#endif	/* __ASSEMBLY__ */
 978
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 979#define KERNEL_PGD_BOUNDARY	pgd_index(PAGE_OFFSET)
 980#define KERNEL_PGD_PTRS		(PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
 981
 982#ifndef __ASSEMBLY__
 983
 984extern int direct_gbpages;
 985void init_mem_mapping(void);
 986void early_alloc_pgt_buf(void);
 987extern void memblock_find_dma_reserve(void);
 988void __init poking_init(void);
 989unsigned long init_memory_mapping(unsigned long start,
 990				  unsigned long end, pgprot_t prot);
 991
 992#ifdef CONFIG_X86_64
 
 993extern pgd_t trampoline_pgd_entry;
 
 
 
 
 
 
 
 
 
 
 
 
 994#endif
 995
 996/* local pte updates need not use xchg for locking */
 997static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
 998{
 999	pte_t res = *ptep;
1000
1001	/* Pure native function needs no input for mm, addr */
1002	native_pte_clear(NULL, 0, ptep);
1003	return res;
1004}
1005
1006static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
1007{
1008	pmd_t res = *pmdp;
1009
1010	native_pmd_clear(pmdp);
1011	return res;
1012}
1013
1014static inline pud_t native_local_pudp_get_and_clear(pud_t *pudp)
 
1015{
1016	pud_t res = *pudp;
1017
1018	native_pud_clear(pudp);
1019	return res;
1020}
1021
1022static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1023			      pte_t *ptep, pte_t pte)
1024{
1025	page_table_check_pte_set(mm, addr, ptep, pte);
1026	set_pte(ptep, pte);
1027}
1028
1029static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1030			      pmd_t *pmdp, pmd_t pmd)
1031{
1032	page_table_check_pmd_set(mm, addr, pmdp, pmd);
1033	set_pmd(pmdp, pmd);
1034}
1035
1036static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
1037			      pud_t *pudp, pud_t pud)
1038{
1039	page_table_check_pud_set(mm, addr, pudp, pud);
1040	native_set_pud(pudp, pud);
1041}
1042
1043/*
1044 * We only update the dirty/accessed state if we set
1045 * the dirty bit by hand in the kernel, since the hardware
1046 * will do the accessed bit for us, and we don't want to
1047 * race with other CPU's that might be updating the dirty
1048 * bit at the same time.
1049 */
1050struct vm_area_struct;
1051
1052#define  __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1053extern int ptep_set_access_flags(struct vm_area_struct *vma,
1054				 unsigned long address, pte_t *ptep,
1055				 pte_t entry, int dirty);
1056
1057#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1058extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
1059				     unsigned long addr, pte_t *ptep);
1060
1061#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1062extern int ptep_clear_flush_young(struct vm_area_struct *vma,
1063				  unsigned long address, pte_t *ptep);
1064
1065#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1066static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
1067				       pte_t *ptep)
1068{
1069	pte_t pte = native_ptep_get_and_clear(ptep);
1070	page_table_check_pte_clear(mm, addr, pte);
1071	return pte;
1072}
1073
1074#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1075static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1076					    unsigned long addr, pte_t *ptep,
1077					    int full)
1078{
1079	pte_t pte;
1080	if (full) {
1081		/*
1082		 * Full address destruction in progress; paravirt does not
1083		 * care about updates and native needs no locking
1084		 */
1085		pte = native_local_ptep_get_and_clear(ptep);
1086		page_table_check_pte_clear(mm, addr, pte);
1087	} else {
1088		pte = ptep_get_and_clear(mm, addr, ptep);
1089	}
1090	return pte;
1091}
1092
1093#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1094static inline void ptep_set_wrprotect(struct mm_struct *mm,
1095				      unsigned long addr, pte_t *ptep)
1096{
1097	clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
 
1098}
1099
1100#define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
1101
1102#define mk_pmd(page, pgprot)   pfn_pmd(page_to_pfn(page), (pgprot))
1103
1104#define  __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1105extern int pmdp_set_access_flags(struct vm_area_struct *vma,
1106				 unsigned long address, pmd_t *pmdp,
1107				 pmd_t entry, int dirty);
1108extern int pudp_set_access_flags(struct vm_area_struct *vma,
1109				 unsigned long address, pud_t *pudp,
1110				 pud_t entry, int dirty);
1111
1112#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1113extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1114				     unsigned long addr, pmd_t *pmdp);
1115extern int pudp_test_and_clear_young(struct vm_area_struct *vma,
1116				     unsigned long addr, pud_t *pudp);
1117
1118#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1119extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
1120				  unsigned long address, pmd_t *pmdp);
1121
1122
1123#define pmd_write pmd_write
1124static inline int pmd_write(pmd_t pmd)
1125{
1126	return pmd_flags(pmd) & _PAGE_RW;
1127}
1128
1129#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1130static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, unsigned long addr,
1131				       pmd_t *pmdp)
1132{
1133	pmd_t pmd = native_pmdp_get_and_clear(pmdp);
1134
1135	page_table_check_pmd_clear(mm, addr, pmd);
1136
1137	return pmd;
1138}
1139
1140#define __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR
1141static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
1142					unsigned long addr, pud_t *pudp)
1143{
1144	pud_t pud = native_pudp_get_and_clear(pudp);
1145
1146	page_table_check_pud_clear(mm, addr, pud);
1147
1148	return pud;
1149}
1150
1151#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1152static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1153				      unsigned long addr, pmd_t *pmdp)
1154{
1155	clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
1156}
1157
1158#define pud_write pud_write
1159static inline int pud_write(pud_t pud)
1160{
1161	return pud_flags(pud) & _PAGE_RW;
1162}
1163
1164#ifndef pmdp_establish
1165#define pmdp_establish pmdp_establish
1166static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
1167		unsigned long address, pmd_t *pmdp, pmd_t pmd)
1168{
1169	page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
1170	if (IS_ENABLED(CONFIG_SMP)) {
1171		return xchg(pmdp, pmd);
1172	} else {
1173		pmd_t old = *pmdp;
1174		WRITE_ONCE(*pmdp, pmd);
1175		return old;
1176	}
1177}
1178#endif
1179
1180#define __HAVE_ARCH_PMDP_INVALIDATE_AD
1181extern pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma,
1182				unsigned long address, pmd_t *pmdp);
1183
1184/*
1185 * Page table pages are page-aligned.  The lower half of the top
1186 * level is used for userspace and the top half for the kernel.
1187 *
1188 * Returns true for parts of the PGD that map userspace and
1189 * false for the parts that map the kernel.
1190 */
1191static inline bool pgdp_maps_userspace(void *__ptr)
1192{
1193	unsigned long ptr = (unsigned long)__ptr;
1194
1195	return (((ptr & ~PAGE_MASK) / sizeof(pgd_t)) < PGD_KERNEL_START);
1196}
1197
1198#define pgd_leaf	pgd_large
1199static inline int pgd_large(pgd_t pgd) { return 0; }
1200
1201#ifdef CONFIG_PAGE_TABLE_ISOLATION
1202/*
1203 * All top-level PAGE_TABLE_ISOLATION page tables are order-1 pages
1204 * (8k-aligned and 8k in size).  The kernel one is at the beginning 4k and
1205 * the user one is in the last 4k.  To switch between them, you
1206 * just need to flip the 12th bit in their addresses.
1207 */
1208#define PTI_PGTABLE_SWITCH_BIT	PAGE_SHIFT
1209
1210/*
1211 * This generates better code than the inline assembly in
1212 * __set_bit().
1213 */
1214static inline void *ptr_set_bit(void *ptr, int bit)
1215{
1216	unsigned long __ptr = (unsigned long)ptr;
1217
1218	__ptr |= BIT(bit);
1219	return (void *)__ptr;
1220}
1221static inline void *ptr_clear_bit(void *ptr, int bit)
1222{
1223	unsigned long __ptr = (unsigned long)ptr;
1224
1225	__ptr &= ~BIT(bit);
1226	return (void *)__ptr;
1227}
1228
1229static inline pgd_t *kernel_to_user_pgdp(pgd_t *pgdp)
1230{
1231	return ptr_set_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1232}
1233
1234static inline pgd_t *user_to_kernel_pgdp(pgd_t *pgdp)
1235{
1236	return ptr_clear_bit(pgdp, PTI_PGTABLE_SWITCH_BIT);
1237}
1238
1239static inline p4d_t *kernel_to_user_p4dp(p4d_t *p4dp)
1240{
1241	return ptr_set_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1242}
1243
1244static inline p4d_t *user_to_kernel_p4dp(p4d_t *p4dp)
1245{
1246	return ptr_clear_bit(p4dp, PTI_PGTABLE_SWITCH_BIT);
1247}
1248#endif /* CONFIG_PAGE_TABLE_ISOLATION */
1249
1250/*
1251 * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
1252 *
1253 *  dst - pointer to pgd range anywhere on a pgd page
1254 *  src - ""
1255 *  count - the number of pgds to copy.
1256 *
1257 * dst and src can be on the same page, but the range must not overlap,
1258 * and must not cross a page boundary.
1259 */
1260static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
1261{
1262	memcpy(dst, src, count * sizeof(pgd_t));
1263#ifdef CONFIG_PAGE_TABLE_ISOLATION
1264	if (!static_cpu_has(X86_FEATURE_PTI))
1265		return;
1266	/* Clone the user space pgd as well */
1267	memcpy(kernel_to_user_pgdp(dst), kernel_to_user_pgdp(src),
1268	       count * sizeof(pgd_t));
1269#endif
1270}
1271
1272#define PTE_SHIFT ilog2(PTRS_PER_PTE)
1273static inline int page_level_shift(enum pg_level level)
1274{
1275	return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
1276}
1277static inline unsigned long page_level_size(enum pg_level level)
1278{
1279	return 1UL << page_level_shift(level);
1280}
1281static inline unsigned long page_level_mask(enum pg_level level)
1282{
1283	return ~(page_level_size(level) - 1);
1284}
1285
1286/*
1287 * The x86 doesn't have any external MMU info: the kernel page
1288 * tables contain all the necessary information.
1289 */
1290static inline void update_mmu_cache(struct vm_area_struct *vma,
1291		unsigned long addr, pte_t *ptep)
1292{
1293}
1294static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
1295		unsigned long addr, pmd_t *pmd)
1296{
1297}
1298static inline void update_mmu_cache_pud(struct vm_area_struct *vma,
1299		unsigned long addr, pud_t *pud)
1300{
1301}
1302#ifdef _PAGE_SWP_EXCLUSIVE
1303#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
1304static inline pte_t pte_swp_mkexclusive(pte_t pte)
1305{
1306	return pte_set_flags(pte, _PAGE_SWP_EXCLUSIVE);
1307}
1308
1309static inline int pte_swp_exclusive(pte_t pte)
1310{
1311	return pte_flags(pte) & _PAGE_SWP_EXCLUSIVE;
1312}
1313
1314static inline pte_t pte_swp_clear_exclusive(pte_t pte)
1315{
1316	return pte_clear_flags(pte, _PAGE_SWP_EXCLUSIVE);
1317}
1318#endif /* _PAGE_SWP_EXCLUSIVE */
1319
1320#ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
1321static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
1322{
1323	return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1324}
1325
1326static inline int pte_swp_soft_dirty(pte_t pte)
1327{
1328	return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
1329}
1330
1331static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
1332{
1333	return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
1334}
1335
1336#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1337static inline pmd_t pmd_swp_mksoft_dirty(pmd_t pmd)
1338{
1339	return pmd_set_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1340}
1341
1342static inline int pmd_swp_soft_dirty(pmd_t pmd)
1343{
1344	return pmd_flags(pmd) & _PAGE_SWP_SOFT_DIRTY;
1345}
1346
1347static inline pmd_t pmd_swp_clear_soft_dirty(pmd_t pmd)
1348{
1349	return pmd_clear_flags(pmd, _PAGE_SWP_SOFT_DIRTY);
1350}
1351#endif
1352#endif
1353
1354#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
1355static inline pte_t pte_swp_mkuffd_wp(pte_t pte)
1356{
1357	return pte_set_flags(pte, _PAGE_SWP_UFFD_WP);
1358}
1359
1360static inline int pte_swp_uffd_wp(pte_t pte)
1361{
1362	return pte_flags(pte) & _PAGE_SWP_UFFD_WP;
 
1363}
1364
1365static inline pte_t pte_swp_clear_uffd_wp(pte_t pte)
1366{
1367	return pte_clear_flags(pte, _PAGE_SWP_UFFD_WP);
1368}
1369
1370static inline pmd_t pmd_swp_mkuffd_wp(pmd_t pmd)
1371{
1372	return pmd_set_flags(pmd, _PAGE_SWP_UFFD_WP);
1373}
1374
1375static inline int pmd_swp_uffd_wp(pmd_t pmd)
1376{
1377	return pmd_flags(pmd) & _PAGE_SWP_UFFD_WP;
1378}
1379
1380static inline pmd_t pmd_swp_clear_uffd_wp(pmd_t pmd)
1381{
1382	return pmd_clear_flags(pmd, _PAGE_SWP_UFFD_WP);
1383}
1384#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
1385
1386static inline u16 pte_flags_pkey(unsigned long pte_flags)
1387{
1388#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
1389	/* ifdef to avoid doing 59-bit shift on 32-bit values */
1390	return (pte_flags & _PAGE_PKEY_MASK) >> _PAGE_BIT_PKEY_BIT0;
1391#else
1392	return 0;
1393#endif
1394}
1395
1396static inline bool __pkru_allows_pkey(u16 pkey, bool write)
1397{
1398	u32 pkru = read_pkru();
1399
1400	if (!__pkru_allows_read(pkru, pkey))
1401		return false;
1402	if (write && !__pkru_allows_write(pkru, pkey))
1403		return false;
1404
1405	return true;
1406}
1407
1408/*
1409 * 'pteval' can come from a PTE, PMD or PUD.  We only check
1410 * _PAGE_PRESENT, _PAGE_USER, and _PAGE_RW in here which are the
1411 * same value on all 3 types.
1412 */
1413static inline bool __pte_access_permitted(unsigned long pteval, bool write)
1414{
1415	unsigned long need_pte_bits = _PAGE_PRESENT|_PAGE_USER;
1416
1417	if (write)
1418		need_pte_bits |= _PAGE_RW;
1419
1420	if ((pteval & need_pte_bits) != need_pte_bits)
1421		return 0;
1422
1423	return __pkru_allows_pkey(pte_flags_pkey(pteval), write);
1424}
1425
1426#define pte_access_permitted pte_access_permitted
1427static inline bool pte_access_permitted(pte_t pte, bool write)
1428{
1429	return __pte_access_permitted(pte_val(pte), write);
1430}
1431
1432#define pmd_access_permitted pmd_access_permitted
1433static inline bool pmd_access_permitted(pmd_t pmd, bool write)
1434{
1435	return __pte_access_permitted(pmd_val(pmd), write);
1436}
1437
1438#define pud_access_permitted pud_access_permitted
1439static inline bool pud_access_permitted(pud_t pud, bool write)
1440{
1441	return __pte_access_permitted(pud_val(pud), write);
1442}
1443
1444#define __HAVE_ARCH_PFN_MODIFY_ALLOWED 1
1445extern bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot);
1446
1447static inline bool arch_has_pfn_modify_check(void)
1448{
1449	return boot_cpu_has_bug(X86_BUG_L1TF);
1450}
1451
1452#define arch_has_hw_pte_young arch_has_hw_pte_young
1453static inline bool arch_has_hw_pte_young(void)
1454{
1455	return true;
1456}
1457
1458#ifdef CONFIG_XEN_PV
1459#define arch_has_hw_nonleaf_pmd_young arch_has_hw_nonleaf_pmd_young
1460static inline bool arch_has_hw_nonleaf_pmd_young(void)
1461{
1462	return !cpu_feature_enabled(X86_FEATURE_XENPV);
1463}
1464#endif
1465
1466#ifdef CONFIG_PAGE_TABLE_CHECK
1467static inline bool pte_user_accessible_page(pte_t pte)
1468{
1469	return (pte_val(pte) & _PAGE_PRESENT) && (pte_val(pte) & _PAGE_USER);
1470}
1471
1472static inline bool pmd_user_accessible_page(pmd_t pmd)
1473{
1474	return pmd_leaf(pmd) && (pmd_val(pmd) & _PAGE_PRESENT) && (pmd_val(pmd) & _PAGE_USER);
1475}
1476
1477static inline bool pud_user_accessible_page(pud_t pud)
1478{
1479	return pud_leaf(pud) && (pud_val(pud) & _PAGE_PRESENT) && (pud_val(pud) & _PAGE_USER);
1480}
1481#endif
1482
1483#endif	/* __ASSEMBLY__ */
1484
1485#endif /* _ASM_X86_PGTABLE_H */