Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains pgtable related functions for 64-bit machines.
4 *
5 * Derived from arch/ppc64/mm/init.c
6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
7 *
8 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
9 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
10 * Copyright (C) 1996 Paul Mackerras
11 *
12 * Derived from "arch/i386/mm/init.c"
13 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
14 *
15 * Dave Engebretsen <engebret@us.ibm.com>
16 * Rework for PPC64 port.
17 */
18
19#include <linux/signal.h>
20#include <linux/sched.h>
21#include <linux/kernel.h>
22#include <linux/errno.h>
23#include <linux/string.h>
24#include <linux/export.h>
25#include <linux/types.h>
26#include <linux/mman.h>
27#include <linux/mm.h>
28#include <linux/swap.h>
29#include <linux/stddef.h>
30#include <linux/vmalloc.h>
31#include <linux/slab.h>
32#include <linux/hugetlb.h>
33
34#include <asm/page.h>
35#include <asm/mmu_context.h>
36#include <asm/mmu.h>
37#include <asm/smp.h>
38#include <asm/machdep.h>
39#include <asm/tlb.h>
40#include <asm/processor.h>
41#include <asm/cputable.h>
42#include <asm/sections.h>
43#include <asm/firmware.h>
44#include <asm/dma.h>
45
46#include <mm/mmu_decl.h>
47
48
49#ifdef CONFIG_PPC_BOOK3S_64
50/*
51 * partition table and process table for ISA 3.0
52 */
53struct prtb_entry *process_tb;
54struct patb_entry *partition_tb;
55/*
56 * page table size
57 */
58unsigned long __pte_index_size;
59EXPORT_SYMBOL(__pte_index_size);
60unsigned long __pmd_index_size;
61EXPORT_SYMBOL(__pmd_index_size);
62unsigned long __pud_index_size;
63EXPORT_SYMBOL(__pud_index_size);
64unsigned long __pgd_index_size;
65EXPORT_SYMBOL(__pgd_index_size);
66unsigned long __pud_cache_index;
67EXPORT_SYMBOL(__pud_cache_index);
68unsigned long __pte_table_size;
69EXPORT_SYMBOL(__pte_table_size);
70unsigned long __pmd_table_size;
71EXPORT_SYMBOL(__pmd_table_size);
72unsigned long __pud_table_size;
73EXPORT_SYMBOL(__pud_table_size);
74unsigned long __pgd_table_size;
75EXPORT_SYMBOL(__pgd_table_size);
76unsigned long __pmd_val_bits;
77EXPORT_SYMBOL(__pmd_val_bits);
78unsigned long __pud_val_bits;
79EXPORT_SYMBOL(__pud_val_bits);
80unsigned long __pgd_val_bits;
81EXPORT_SYMBOL(__pgd_val_bits);
82unsigned long __kernel_virt_start;
83EXPORT_SYMBOL(__kernel_virt_start);
84unsigned long __vmalloc_start;
85EXPORT_SYMBOL(__vmalloc_start);
86unsigned long __vmalloc_end;
87EXPORT_SYMBOL(__vmalloc_end);
88unsigned long __kernel_io_start;
89EXPORT_SYMBOL(__kernel_io_start);
90unsigned long __kernel_io_end;
91struct page *vmemmap;
92EXPORT_SYMBOL(vmemmap);
93unsigned long __pte_frag_nr;
94EXPORT_SYMBOL(__pte_frag_nr);
95unsigned long __pte_frag_size_shift;
96EXPORT_SYMBOL(__pte_frag_size_shift);
97#endif
98
99#ifndef __PAGETABLE_PUD_FOLDED
100/* 4 level page table */
101struct page *p4d_page(p4d_t p4d)
102{
103 if (p4d_is_leaf(p4d)) {
104 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
105 VM_WARN_ON(!p4d_huge(p4d));
106 return pte_page(p4d_pte(p4d));
107 }
108 return virt_to_page(p4d_pgtable(p4d));
109}
110#endif
111
112struct page *pud_page(pud_t pud)
113{
114 if (pud_is_leaf(pud)) {
115 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
116 VM_WARN_ON(!pud_huge(pud));
117 return pte_page(pud_pte(pud));
118 }
119 return virt_to_page(pud_pgtable(pud));
120}
121
122/*
123 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
124 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
125 */
126struct page *pmd_page(pmd_t pmd)
127{
128 if (pmd_is_leaf(pmd)) {
129 /*
130 * vmalloc_to_page may be called on any vmap address (not only
131 * vmalloc), and it uses pmd_page() etc., when huge vmap is
132 * enabled so these checks can't be used.
133 */
134 if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
135 VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
136 return pte_page(pmd_pte(pmd));
137 }
138 return virt_to_page(pmd_page_vaddr(pmd));
139}
140
141#ifdef CONFIG_STRICT_KERNEL_RWX
142void mark_rodata_ro(void)
143{
144 if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
145 pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
146 return;
147 }
148
149 if (radix_enabled())
150 radix__mark_rodata_ro();
151 else
152 hash__mark_rodata_ro();
153
154 // mark_initmem_nx() should have already run by now
155 ptdump_check_wx();
156}
157
158void mark_initmem_nx(void)
159{
160 if (radix_enabled())
161 radix__mark_initmem_nx();
162 else
163 hash__mark_initmem_nx();
164}
165#endif
1/*
2 * This file contains ioremap and related functions for 64-bit machines.
3 *
4 * Derived from arch/ppc64/mm/init.c
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 *
7 * Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
8 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
9 * Copyright (C) 1996 Paul Mackerras
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 *
14 * Dave Engebretsen <engebret@us.ibm.com>
15 * Rework for PPC64 port.
16 *
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
21 *
22 */
23
24#include <linux/signal.h>
25#include <linux/sched.h>
26#include <linux/kernel.h>
27#include <linux/errno.h>
28#include <linux/string.h>
29#include <linux/export.h>
30#include <linux/types.h>
31#include <linux/mman.h>
32#include <linux/mm.h>
33#include <linux/swap.h>
34#include <linux/stddef.h>
35#include <linux/vmalloc.h>
36#include <linux/init.h>
37#include <linux/bootmem.h>
38#include <linux/memblock.h>
39#include <linux/slab.h>
40
41#include <asm/pgalloc.h>
42#include <asm/page.h>
43#include <asm/prom.h>
44#include <asm/io.h>
45#include <asm/mmu_context.h>
46#include <asm/pgtable.h>
47#include <asm/mmu.h>
48#include <asm/smp.h>
49#include <asm/machdep.h>
50#include <asm/tlb.h>
51#include <asm/processor.h>
52#include <asm/cputable.h>
53#include <asm/sections.h>
54#include <asm/abs_addr.h>
55#include <asm/firmware.h>
56
57#include "mmu_decl.h"
58
59unsigned long ioremap_bot = IOREMAP_BASE;
60
61
62#ifdef CONFIG_PPC_MMU_NOHASH
63static void *early_alloc_pgtable(unsigned long size)
64{
65 void *pt;
66
67 if (init_bootmem_done)
68 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
69 else
70 pt = __va(memblock_alloc_base(size, size,
71 __pa(MAX_DMA_ADDRESS)));
72 memset(pt, 0, size);
73
74 return pt;
75}
76#endif /* CONFIG_PPC_MMU_NOHASH */
77
78/*
79 * map_kernel_page currently only called by __ioremap
80 * map_kernel_page adds an entry to the ioremap page table
81 * and adds an entry to the HPT, possibly bolting it
82 */
83int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
84{
85 pgd_t *pgdp;
86 pud_t *pudp;
87 pmd_t *pmdp;
88 pte_t *ptep;
89
90 if (slab_is_available()) {
91 pgdp = pgd_offset_k(ea);
92 pudp = pud_alloc(&init_mm, pgdp, ea);
93 if (!pudp)
94 return -ENOMEM;
95 pmdp = pmd_alloc(&init_mm, pudp, ea);
96 if (!pmdp)
97 return -ENOMEM;
98 ptep = pte_alloc_kernel(pmdp, ea);
99 if (!ptep)
100 return -ENOMEM;
101 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
102 __pgprot(flags)));
103 } else {
104#ifdef CONFIG_PPC_MMU_NOHASH
105 /* Warning ! This will blow up if bootmem is not initialized
106 * which our ppc64 code is keen to do that, we'll need to
107 * fix it and/or be more careful
108 */
109 pgdp = pgd_offset_k(ea);
110#ifdef PUD_TABLE_SIZE
111 if (pgd_none(*pgdp)) {
112 pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
113 BUG_ON(pudp == NULL);
114 pgd_populate(&init_mm, pgdp, pudp);
115 }
116#endif /* PUD_TABLE_SIZE */
117 pudp = pud_offset(pgdp, ea);
118 if (pud_none(*pudp)) {
119 pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
120 BUG_ON(pmdp == NULL);
121 pud_populate(&init_mm, pudp, pmdp);
122 }
123 pmdp = pmd_offset(pudp, ea);
124 if (!pmd_present(*pmdp)) {
125 ptep = early_alloc_pgtable(PAGE_SIZE);
126 BUG_ON(ptep == NULL);
127 pmd_populate_kernel(&init_mm, pmdp, ptep);
128 }
129 ptep = pte_offset_kernel(pmdp, ea);
130 set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
131 __pgprot(flags)));
132#else /* CONFIG_PPC_MMU_NOHASH */
133 /*
134 * If the mm subsystem is not fully up, we cannot create a
135 * linux page table entry for this mapping. Simply bolt an
136 * entry in the hardware page table.
137 *
138 */
139 if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
140 mmu_io_psize, mmu_kernel_ssize)) {
141 printk(KERN_ERR "Failed to do bolted mapping IO "
142 "memory at %016lx !\n", pa);
143 return -ENOMEM;
144 }
145#endif /* !CONFIG_PPC_MMU_NOHASH */
146 }
147 return 0;
148}
149
150
151/**
152 * __ioremap_at - Low level function to establish the page tables
153 * for an IO mapping
154 */
155void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
156 unsigned long flags)
157{
158 unsigned long i;
159
160 /* Make sure we have the base flags */
161 if ((flags & _PAGE_PRESENT) == 0)
162 flags |= pgprot_val(PAGE_KERNEL);
163
164 /* Non-cacheable page cannot be coherent */
165 if (flags & _PAGE_NO_CACHE)
166 flags &= ~_PAGE_COHERENT;
167
168 /* We don't support the 4K PFN hack with ioremap */
169 if (flags & _PAGE_4K_PFN)
170 return NULL;
171
172 WARN_ON(pa & ~PAGE_MASK);
173 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
174 WARN_ON(size & ~PAGE_MASK);
175
176 for (i = 0; i < size; i += PAGE_SIZE)
177 if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
178 return NULL;
179
180 return (void __iomem *)ea;
181}
182
183/**
184 * __iounmap_from - Low level function to tear down the page tables
185 * for an IO mapping. This is used for mappings that
186 * are manipulated manually, like partial unmapping of
187 * PCI IOs or ISA space.
188 */
189void __iounmap_at(void *ea, unsigned long size)
190{
191 WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
192 WARN_ON(size & ~PAGE_MASK);
193
194 unmap_kernel_range((unsigned long)ea, size);
195}
196
197void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
198 unsigned long flags, void *caller)
199{
200 phys_addr_t paligned;
201 void __iomem *ret;
202
203 /*
204 * Choose an address to map it to.
205 * Once the imalloc system is running, we use it.
206 * Before that, we map using addresses going
207 * up from ioremap_bot. imalloc will use
208 * the addresses from ioremap_bot through
209 * IMALLOC_END
210 *
211 */
212 paligned = addr & PAGE_MASK;
213 size = PAGE_ALIGN(addr + size) - paligned;
214
215 if ((size == 0) || (paligned == 0))
216 return NULL;
217
218 if (mem_init_done) {
219 struct vm_struct *area;
220
221 area = __get_vm_area_caller(size, VM_IOREMAP,
222 ioremap_bot, IOREMAP_END,
223 caller);
224 if (area == NULL)
225 return NULL;
226
227 area->phys_addr = paligned;
228 ret = __ioremap_at(paligned, area->addr, size, flags);
229 if (!ret)
230 vunmap(area->addr);
231 } else {
232 ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
233 if (ret)
234 ioremap_bot += size;
235 }
236
237 if (ret)
238 ret += addr & ~PAGE_MASK;
239 return ret;
240}
241
242void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
243 unsigned long flags)
244{
245 return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
246}
247
248void __iomem * ioremap(phys_addr_t addr, unsigned long size)
249{
250 unsigned long flags = _PAGE_NO_CACHE | _PAGE_GUARDED;
251 void *caller = __builtin_return_address(0);
252
253 if (ppc_md.ioremap)
254 return ppc_md.ioremap(addr, size, flags, caller);
255 return __ioremap_caller(addr, size, flags, caller);
256}
257
258void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
259{
260 unsigned long flags = _PAGE_NO_CACHE;
261 void *caller = __builtin_return_address(0);
262
263 if (ppc_md.ioremap)
264 return ppc_md.ioremap(addr, size, flags, caller);
265 return __ioremap_caller(addr, size, flags, caller);
266}
267
268void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
269 unsigned long flags)
270{
271 void *caller = __builtin_return_address(0);
272
273 /* writeable implies dirty for kernel addresses */
274 if (flags & _PAGE_RW)
275 flags |= _PAGE_DIRTY;
276
277 /* we don't want to let _PAGE_USER and _PAGE_EXEC leak out */
278 flags &= ~(_PAGE_USER | _PAGE_EXEC);
279
280#ifdef _PAGE_BAP_SR
281 /* _PAGE_USER contains _PAGE_BAP_SR on BookE using the new PTE format
282 * which means that we just cleared supervisor access... oops ;-) This
283 * restores it
284 */
285 flags |= _PAGE_BAP_SR;
286#endif
287
288 if (ppc_md.ioremap)
289 return ppc_md.ioremap(addr, size, flags, caller);
290 return __ioremap_caller(addr, size, flags, caller);
291}
292
293
294/*
295 * Unmap an IO region and remove it from imalloc'd list.
296 * Access to IO memory should be serialized by driver.
297 */
298void __iounmap(volatile void __iomem *token)
299{
300 void *addr;
301
302 if (!mem_init_done)
303 return;
304
305 addr = (void *) ((unsigned long __force)
306 PCI_FIX_ADDR(token) & PAGE_MASK);
307 if ((unsigned long)addr < ioremap_bot) {
308 printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
309 " at 0x%p\n", addr);
310 return;
311 }
312 vunmap(addr);
313}
314
315void iounmap(volatile void __iomem *token)
316{
317 if (ppc_md.iounmap)
318 ppc_md.iounmap(token);
319 else
320 __iounmap(token);
321}
322
323EXPORT_SYMBOL(ioremap);
324EXPORT_SYMBOL(ioremap_wc);
325EXPORT_SYMBOL(ioremap_prot);
326EXPORT_SYMBOL(__ioremap);
327EXPORT_SYMBOL(__ioremap_at);
328EXPORT_SYMBOL(iounmap);
329EXPORT_SYMBOL(__iounmap);
330EXPORT_SYMBOL(__iounmap_at);