Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_POWERPC_PGTABLE_RADIX_H
3#define _ASM_POWERPC_PGTABLE_RADIX_H
4
5#include <asm/asm-const.h>
6
7#ifndef __ASSEMBLY__
8#include <asm/cmpxchg.h>
9#endif
10
11#ifdef CONFIG_PPC_64K_PAGES
12#include <asm/book3s/64/radix-64k.h>
13#else
14#include <asm/book3s/64/radix-4k.h>
15#endif
16
17#ifndef __ASSEMBLY__
18#include <asm/book3s/64/tlbflush-radix.h>
19#include <asm/cpu_has_feature.h>
20#endif
21
22/* An empty PTE can still have a R or C writeback */
23#define RADIX_PTE_NONE_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
24
25/* Bits to set in a RPMD/RPUD/RPGD */
26#define RADIX_PMD_VAL_BITS (0x8000000000000000UL | RADIX_PTE_INDEX_SIZE)
27#define RADIX_PUD_VAL_BITS (0x8000000000000000UL | RADIX_PMD_INDEX_SIZE)
28#define RADIX_PGD_VAL_BITS (0x8000000000000000UL | RADIX_PUD_INDEX_SIZE)
29
30/* Don't have anything in the reserved bits and leaf bits */
31#define RADIX_PMD_BAD_BITS 0x60000000000000e0UL
32#define RADIX_PUD_BAD_BITS 0x60000000000000e0UL
33#define RADIX_P4D_BAD_BITS 0x60000000000000e0UL
34
35#define RADIX_PMD_SHIFT (PAGE_SHIFT + RADIX_PTE_INDEX_SIZE)
36#define RADIX_PUD_SHIFT (RADIX_PMD_SHIFT + RADIX_PMD_INDEX_SIZE)
37#define RADIX_PGD_SHIFT (RADIX_PUD_SHIFT + RADIX_PUD_INDEX_SIZE)
38
39#define R_PTRS_PER_PTE (1 << RADIX_PTE_INDEX_SIZE)
40#define R_PTRS_PER_PMD (1 << RADIX_PMD_INDEX_SIZE)
41#define R_PTRS_PER_PUD (1 << RADIX_PUD_INDEX_SIZE)
42
43/*
44 * Size of EA range mapped by our pagetables.
45 */
46#define RADIX_PGTABLE_EADDR_SIZE (RADIX_PTE_INDEX_SIZE + RADIX_PMD_INDEX_SIZE + \
47 RADIX_PUD_INDEX_SIZE + RADIX_PGD_INDEX_SIZE + PAGE_SHIFT)
48#define RADIX_PGTABLE_RANGE (ASM_CONST(1) << RADIX_PGTABLE_EADDR_SIZE)
49
50/*
51 * We support 52 bit address space, Use top bit for kernel
52 * virtual mapping. Also make sure kernel fit in the top
53 * quadrant.
54 *
55 * +------------------+
56 * +------------------+ Kernel virtual map (0xc008000000000000)
57 * | |
58 * | |
59 * | |
60 * 0b11......+------------------+ Kernel linear map (0xc....)
61 * | |
62 * | 2 quadrant |
63 * | |
64 * 0b10......+------------------+
65 * | |
66 * | 1 quadrant |
67 * | |
68 * 0b01......+------------------+
69 * | |
70 * | 0 quadrant |
71 * | |
72 * 0b00......+------------------+
73 *
74 *
75 * 3rd quadrant expanded:
76 * +------------------------------+ Highest address (0xc010000000000000)
77 * +------------------------------+ KASAN shadow end (0xc00fc00000000000)
78 * | |
79 * | |
80 * +------------------------------+ Kernel vmemmap end/shadow start (0xc00e000000000000)
81 * | |
82 * | 512TB |
83 * | |
84 * +------------------------------+ Kernel IO map end/vmemap start
85 * | |
86 * | 512TB |
87 * | |
88 * +------------------------------+ Kernel vmap end/ IO map start
89 * | |
90 * | 512TB |
91 * | |
92 * +------------------------------+ Kernel virt start (0xc008000000000000)
93 * | |
94 * | |
95 * | |
96 * +------------------------------+ Kernel linear (0xc.....)
97 */
98
99/* For the sizes of the shadow area, see kasan.h */
100
101/*
102 * If we store section details in page->flags we can't increase the MAX_PHYSMEM_BITS
103 * if we increase SECTIONS_WIDTH we will not store node details in page->flags and
104 * page_to_nid does a page->section->node lookup
105 * Hence only increase for VMEMMAP. Further depending on SPARSEMEM_EXTREME reduce
106 * memory requirements with large number of sections.
107 * 51 bits is the max physical real address on POWER9
108 */
109
110#if defined(CONFIG_SPARSEMEM_VMEMMAP) && defined(CONFIG_SPARSEMEM_EXTREME)
111#define R_MAX_PHYSMEM_BITS 51
112#else
113#define R_MAX_PHYSMEM_BITS 46
114#endif
115
116#define RADIX_KERN_VIRT_START ASM_CONST(0xc008000000000000)
117/*
118 * 49 = MAX_EA_BITS_PER_CONTEXT (hash specific). To make sure we pick
119 * the same value as hash.
120 */
121#define RADIX_KERN_MAP_SIZE (1UL << 49)
122
123#define RADIX_VMALLOC_START RADIX_KERN_VIRT_START
124#define RADIX_VMALLOC_SIZE RADIX_KERN_MAP_SIZE
125#define RADIX_VMALLOC_END (RADIX_VMALLOC_START + RADIX_VMALLOC_SIZE)
126
127#define RADIX_KERN_IO_START RADIX_VMALLOC_END
128#define RADIX_KERN_IO_SIZE RADIX_KERN_MAP_SIZE
129#define RADIX_KERN_IO_END (RADIX_KERN_IO_START + RADIX_KERN_IO_SIZE)
130
131#define RADIX_VMEMMAP_START RADIX_KERN_IO_END
132#define RADIX_VMEMMAP_SIZE RADIX_KERN_MAP_SIZE
133#define RADIX_VMEMMAP_END (RADIX_VMEMMAP_START + RADIX_VMEMMAP_SIZE)
134
135#ifndef __ASSEMBLY__
136#define RADIX_PTE_TABLE_SIZE (sizeof(pte_t) << RADIX_PTE_INDEX_SIZE)
137#define RADIX_PMD_TABLE_SIZE (sizeof(pmd_t) << RADIX_PMD_INDEX_SIZE)
138#define RADIX_PUD_TABLE_SIZE (sizeof(pud_t) << RADIX_PUD_INDEX_SIZE)
139#define RADIX_PGD_TABLE_SIZE (sizeof(pgd_t) << RADIX_PGD_INDEX_SIZE)
140
141#ifdef CONFIG_STRICT_KERNEL_RWX
142extern void radix__mark_rodata_ro(void);
143extern void radix__mark_initmem_nx(void);
144#endif
145
146extern void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
147 pte_t entry, unsigned long address,
148 int psize);
149
150extern void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
151 unsigned long addr, pte_t *ptep,
152 pte_t old_pte, pte_t pte);
153
154static inline unsigned long __radix_pte_update(pte_t *ptep, unsigned long clr,
155 unsigned long set)
156{
157 __be64 old_be, tmp_be;
158
159 __asm__ __volatile__(
160 "1: ldarx %0,0,%3 # pte_update\n"
161 " andc %1,%0,%5 \n"
162 " or %1,%1,%4 \n"
163 " stdcx. %1,0,%3 \n"
164 " bne- 1b"
165 : "=&r" (old_be), "=&r" (tmp_be), "=m" (*ptep)
166 : "r" (ptep), "r" (cpu_to_be64(set)), "r" (cpu_to_be64(clr))
167 : "cc" );
168
169 return be64_to_cpu(old_be);
170}
171
172static inline unsigned long radix__pte_update(struct mm_struct *mm,
173 unsigned long addr,
174 pte_t *ptep, unsigned long clr,
175 unsigned long set,
176 int huge)
177{
178 unsigned long old_pte;
179
180 old_pte = __radix_pte_update(ptep, clr, set);
181 if (!huge)
182 assert_pte_locked(mm, addr);
183
184 return old_pte;
185}
186
187static inline pte_t radix__ptep_get_and_clear_full(struct mm_struct *mm,
188 unsigned long addr,
189 pte_t *ptep, int full)
190{
191 unsigned long old_pte;
192
193 if (full) {
194 old_pte = pte_val(*ptep);
195 *ptep = __pte(0);
196 } else
197 old_pte = radix__pte_update(mm, addr, ptep, ~0ul, 0, 0);
198
199 return __pte(old_pte);
200}
201
202static inline int radix__pte_same(pte_t pte_a, pte_t pte_b)
203{
204 return ((pte_raw(pte_a) ^ pte_raw(pte_b)) == 0);
205}
206
207static inline int radix__pte_none(pte_t pte)
208{
209 return (pte_val(pte) & ~RADIX_PTE_NONE_MASK) == 0;
210}
211
212static inline void radix__set_pte_at(struct mm_struct *mm, unsigned long addr,
213 pte_t *ptep, pte_t pte, int percpu)
214{
215 *ptep = pte;
216
217 /*
218 * The architecture suggests a ptesync after setting the pte, which
219 * orders the store that updates the pte with subsequent page table
220 * walk accesses which may load the pte. Without this it may be
221 * possible for a subsequent access to result in spurious fault.
222 *
223 * This is not necessary for correctness, because a spurious fault
224 * is tolerated by the page fault handler, and this store will
225 * eventually be seen. In testing, there was no noticable increase
226 * in user faults on POWER9. Avoiding ptesync here is a significant
227 * win for things like fork. If a future microarchitecture benefits
228 * from ptesync, it should probably go into update_mmu_cache, rather
229 * than set_pte_at (which is used to set ptes unrelated to faults).
230 *
231 * Spurious faults from the kernel memory are not tolerated, so there
232 * is a ptesync in flush_cache_vmap, and __map_kernel_page() follows
233 * the pte update sequence from ISA Book III 6.10 Translation Table
234 * Update Synchronization Requirements.
235 */
236}
237
238static inline int radix__pmd_bad(pmd_t pmd)
239{
240 return !!(pmd_val(pmd) & RADIX_PMD_BAD_BITS);
241}
242
243static inline int radix__pmd_same(pmd_t pmd_a, pmd_t pmd_b)
244{
245 return ((pmd_raw(pmd_a) ^ pmd_raw(pmd_b)) == 0);
246}
247
248static inline int radix__pud_bad(pud_t pud)
249{
250 return !!(pud_val(pud) & RADIX_PUD_BAD_BITS);
251}
252
253static inline int radix__pud_same(pud_t pud_a, pud_t pud_b)
254{
255 return ((pud_raw(pud_a) ^ pud_raw(pud_b)) == 0);
256}
257
258static inline int radix__p4d_bad(p4d_t p4d)
259{
260 return !!(p4d_val(p4d) & RADIX_P4D_BAD_BITS);
261}
262
263#ifdef CONFIG_TRANSPARENT_HUGEPAGE
264
265static inline int radix__pmd_trans_huge(pmd_t pmd)
266{
267 return (pmd_val(pmd) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
268}
269
270static inline pmd_t radix__pmd_mkhuge(pmd_t pmd)
271{
272 return __pmd(pmd_val(pmd) | _PAGE_PTE);
273}
274
275static inline int radix__pud_trans_huge(pud_t pud)
276{
277 return (pud_val(pud) & (_PAGE_PTE | _PAGE_DEVMAP)) == _PAGE_PTE;
278}
279
280static inline pud_t radix__pud_mkhuge(pud_t pud)
281{
282 return __pud(pud_val(pud) | _PAGE_PTE);
283}
284
285extern unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
286 pmd_t *pmdp, unsigned long clr,
287 unsigned long set);
288extern unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long addr,
289 pud_t *pudp, unsigned long clr,
290 unsigned long set);
291extern pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma,
292 unsigned long address, pmd_t *pmdp);
293extern void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
294 pgtable_t pgtable);
295extern pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
296extern pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
297 unsigned long addr, pmd_t *pmdp);
298pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm,
299 unsigned long addr, pud_t *pudp);
300
301static inline int radix__has_transparent_hugepage(void)
302{
303 /* For radix 2M at PMD level means thp */
304 if (mmu_psize_defs[MMU_PAGE_2M].shift == PMD_SHIFT)
305 return 1;
306 return 0;
307}
308
309static inline int radix__has_transparent_pud_hugepage(void)
310{
311 /* For radix 1G at PUD level means pud hugepage support */
312 if (mmu_psize_defs[MMU_PAGE_1G].shift == PUD_SHIFT)
313 return 1;
314 return 0;
315}
316#endif
317
318static inline pmd_t radix__pmd_mkdevmap(pmd_t pmd)
319{
320 return __pmd(pmd_val(pmd) | (_PAGE_PTE | _PAGE_DEVMAP));
321}
322
323static inline pud_t radix__pud_mkdevmap(pud_t pud)
324{
325 return __pud(pud_val(pud) | (_PAGE_PTE | _PAGE_DEVMAP));
326}
327
328struct vmem_altmap;
329struct dev_pagemap;
330extern int __meminit radix__vmemmap_create_mapping(unsigned long start,
331 unsigned long page_size,
332 unsigned long phys);
333int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end,
334 int node, struct vmem_altmap *altmap);
335void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
336 struct vmem_altmap *altmap);
337extern void radix__vmemmap_remove_mapping(unsigned long start,
338 unsigned long page_size);
339
340extern int radix__map_kernel_page(unsigned long ea, unsigned long pa,
341 pgprot_t flags, unsigned int psz);
342
343static inline unsigned long radix__get_tree_size(void)
344{
345 unsigned long rts_field;
346 /*
347 * We support 52 bits, hence:
348 * bits 52 - 31 = 21, 0b10101
349 * RTS encoding details
350 * bits 0 - 3 of rts -> bits 6 - 8 unsigned long
351 * bits 4 - 5 of rts -> bits 62 - 63 of unsigned long
352 */
353 rts_field = (0x5UL << 5); /* 6 - 8 bits */
354 rts_field |= (0x2UL << 61);
355
356 return rts_field;
357}
358
359#ifdef CONFIG_MEMORY_HOTPLUG
360int radix__create_section_mapping(unsigned long start, unsigned long end,
361 int nid, pgprot_t prot);
362int radix__remove_section_mapping(unsigned long start, unsigned long end);
363#endif /* CONFIG_MEMORY_HOTPLUG */
364
365#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
366#define vmemmap_can_optimize vmemmap_can_optimize
367bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap);
368#endif
369
370#define vmemmap_populate_compound_pages vmemmap_populate_compound_pages
371int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
372 unsigned long start,
373 unsigned long end, int node,
374 struct dev_pagemap *pgmap);
375#endif /* __ASSEMBLY__ */
376#endif