Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines setting up the linux page tables.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/memblock.h>
25#include <linux/slab.h>
26#include <linux/set_memory.h>
27
28#include <asm/pgalloc.h>
29#include <asm/fixmap.h>
30#include <asm/setup.h>
31#include <asm/sections.h>
32#include <asm/early_ioremap.h>
33
34#include <mm/mmu_decl.h>
35
36static u8 early_fixmap_pagetable[FIXMAP_PTE_SIZE] __page_aligned_data;
37
38notrace void __init early_ioremap_init(void)
39{
40 unsigned long addr = ALIGN_DOWN(FIXADDR_START, PGDIR_SIZE);
41 pte_t *ptep = (pte_t *)early_fixmap_pagetable;
42 pmd_t *pmdp = pmd_off_k(addr);
43
44 for (; (s32)(FIXADDR_TOP - addr) > 0;
45 addr += PGDIR_SIZE, ptep += PTRS_PER_PTE, pmdp++)
46 pmd_populate_kernel(&init_mm, pmdp, ptep);
47
48 early_ioremap_setup();
49}
50
51static void __init *early_alloc_pgtable(unsigned long size)
52{
53 void *ptr = memblock_alloc(size, size);
54
55 if (!ptr)
56 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
57 __func__, size, size);
58
59 return ptr;
60}
61
62pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
63{
64 if (pmd_none(*pmdp)) {
65 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
66
67 pmd_populate_kernel(&init_mm, pmdp, ptep);
68 }
69 return pte_offset_kernel(pmdp, va);
70}
71
72
73int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
74{
75 pmd_t *pd;
76 pte_t *pg;
77 int err = -ENOMEM;
78
79 /* Use upper 10 bits of VA to index the first level map */
80 pd = pmd_off_k(va);
81 /* Use middle 10 bits of VA to index the second-level map */
82 if (likely(slab_is_available()))
83 pg = pte_alloc_kernel(pd, va);
84 else
85 pg = early_pte_alloc_kernel(pd, va);
86 if (pg) {
87 err = 0;
88 /* The PTE should never be already set nor present in the
89 * hash table
90 */
91 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
92 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
93 }
94 smp_wmb();
95 return err;
96}
97
98/*
99 * Map in a chunk of physical memory starting at start.
100 */
101static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
102{
103 unsigned long v, s;
104 phys_addr_t p;
105 bool ktext;
106
107 s = offset;
108 v = PAGE_OFFSET + s;
109 p = memstart_addr + s;
110 for (; s < top; s += PAGE_SIZE) {
111 ktext = core_kernel_text(v);
112 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
113 v += PAGE_SIZE;
114 p += PAGE_SIZE;
115 }
116}
117
118void __init mapin_ram(void)
119{
120 phys_addr_t base, end;
121 u64 i;
122
123 for_each_mem_range(i, &base, &end) {
124 phys_addr_t top = min(end, total_lowmem);
125
126 if (base >= top)
127 continue;
128 base = mmu_mapin_ram(base, top);
129 __mapin_ram_chunk(base, top);
130 }
131}
132
133void mark_initmem_nx(void)
134{
135 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
136 PFN_DOWN((unsigned long)_sinittext);
137
138 mmu_mark_initmem_nx();
139
140 if (!v_block_mapped((unsigned long)_sinittext)) {
141 set_memory_nx((unsigned long)_sinittext, numpages);
142 set_memory_rw((unsigned long)_sinittext, numpages);
143 }
144}
145
146#ifdef CONFIG_STRICT_KERNEL_RWX
147void mark_rodata_ro(void)
148{
149 unsigned long numpages;
150
151 if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE))
152 pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n");
153
154 if (v_block_mapped((unsigned long)_stext + 1)) {
155 mmu_mark_rodata_ro();
156 ptdump_check_wx();
157 return;
158 }
159
160 /*
161 * mark text and rodata as read only. __end_rodata is set by
162 * powerpc's linker script and includes tables and data
163 * requiring relocation which are not put in RO_DATA.
164 */
165 numpages = PFN_UP((unsigned long)__end_rodata) -
166 PFN_DOWN((unsigned long)_stext);
167
168 set_memory_ro((unsigned long)_stext, numpages);
169
170 // mark_initmem_nx() should have already run by now
171 ptdump_check_wx();
172}
173#endif
174
175#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC)
176void __kernel_map_pages(struct page *page, int numpages, int enable)
177{
178 unsigned long addr = (unsigned long)page_address(page);
179
180 if (PageHighMem(page))
181 return;
182
183 if (enable)
184 set_memory_p(addr, numpages);
185 else
186 set_memory_np(addr, numpages);
187}
188#endif /* CONFIG_DEBUG_PAGEALLOC */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * This file contains the routines setting up the linux page tables.
4 * -- paulus
5 *
6 * Derived from arch/ppc/mm/init.c:
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
10 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
11 * Copyright (C) 1996 Paul Mackerras
12 *
13 * Derived from "arch/i386/mm/init.c"
14 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 */
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/mm.h>
21#include <linux/vmalloc.h>
22#include <linux/init.h>
23#include <linux/highmem.h>
24#include <linux/memblock.h>
25#include <linux/slab.h>
26
27#include <asm/pgtable.h>
28#include <asm/pgalloc.h>
29#include <asm/fixmap.h>
30#include <asm/setup.h>
31#include <asm/sections.h>
32
33#include <mm/mmu_decl.h>
34
35extern char etext[], _stext[], _sinittext[], _einittext[];
36
37static void __init *early_alloc_pgtable(unsigned long size)
38{
39 void *ptr = memblock_alloc(size, size);
40
41 if (!ptr)
42 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
43 __func__, size, size);
44
45 return ptr;
46}
47
48static pte_t __init *early_pte_alloc_kernel(pmd_t *pmdp, unsigned long va)
49{
50 if (pmd_none(*pmdp)) {
51 pte_t *ptep = early_alloc_pgtable(PTE_FRAG_SIZE);
52
53 pmd_populate_kernel(&init_mm, pmdp, ptep);
54 }
55 return pte_offset_kernel(pmdp, va);
56}
57
58
59int __ref map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot)
60{
61 pmd_t *pd;
62 pte_t *pg;
63 int err = -ENOMEM;
64
65 /* Use upper 10 bits of VA to index the first level map */
66 pd = pmd_offset(pud_offset(pgd_offset_k(va), va), va);
67 /* Use middle 10 bits of VA to index the second-level map */
68 if (likely(slab_is_available()))
69 pg = pte_alloc_kernel(pd, va);
70 else
71 pg = early_pte_alloc_kernel(pd, va);
72 if (pg != 0) {
73 err = 0;
74 /* The PTE should never be already set nor present in the
75 * hash table
76 */
77 BUG_ON((pte_present(*pg) | pte_hashpte(*pg)) && pgprot_val(prot));
78 set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, prot));
79 }
80 smp_wmb();
81 return err;
82}
83
84/*
85 * Map in a chunk of physical memory starting at start.
86 */
87static void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
88{
89 unsigned long v, s;
90 phys_addr_t p;
91 int ktext;
92
93 s = offset;
94 v = PAGE_OFFSET + s;
95 p = memstart_addr + s;
96 for (; s < top; s += PAGE_SIZE) {
97 ktext = ((char *)v >= _stext && (char *)v < etext) ||
98 ((char *)v >= _sinittext && (char *)v < _einittext);
99 map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
100#ifdef CONFIG_PPC_BOOK3S_32
101 if (ktext)
102 hash_preload(&init_mm, v);
103#endif
104 v += PAGE_SIZE;
105 p += PAGE_SIZE;
106 }
107}
108
109void __init mapin_ram(void)
110{
111 struct memblock_region *reg;
112
113 for_each_memblock(memory, reg) {
114 phys_addr_t base = reg->base;
115 phys_addr_t top = min(base + reg->size, total_lowmem);
116
117 if (base >= top)
118 continue;
119 base = mmu_mapin_ram(base, top);
120 if (IS_ENABLED(CONFIG_BDI_SWITCH))
121 __mapin_ram_chunk(reg->base, top);
122 else
123 __mapin_ram_chunk(base, top);
124 }
125}
126
127/* Scan the real Linux page tables and return a PTE pointer for
128 * a virtual address in a context.
129 * Returns true (1) if PTE was found, zero otherwise. The pointer to
130 * the PTE pointer is unmodified if PTE is not found.
131 */
132static int
133get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep, pmd_t **pmdp)
134{
135 pgd_t *pgd;
136 pud_t *pud;
137 pmd_t *pmd;
138 pte_t *pte;
139 int retval = 0;
140
141 pgd = pgd_offset(mm, addr & PAGE_MASK);
142 if (pgd) {
143 pud = pud_offset(pgd, addr & PAGE_MASK);
144 if (pud && pud_present(*pud)) {
145 pmd = pmd_offset(pud, addr & PAGE_MASK);
146 if (pmd_present(*pmd)) {
147 pte = pte_offset_map(pmd, addr & PAGE_MASK);
148 if (pte) {
149 retval = 1;
150 *ptep = pte;
151 if (pmdp)
152 *pmdp = pmd;
153 /* XXX caller needs to do pte_unmap, yuck */
154 }
155 }
156 }
157 }
158 return(retval);
159}
160
161static int __change_page_attr_noflush(struct page *page, pgprot_t prot)
162{
163 pte_t *kpte;
164 pmd_t *kpmd;
165 unsigned long address;
166
167 BUG_ON(PageHighMem(page));
168 address = (unsigned long)page_address(page);
169
170 if (v_block_mapped(address))
171 return 0;
172 if (!get_pteptr(&init_mm, address, &kpte, &kpmd))
173 return -EINVAL;
174 __set_pte_at(&init_mm, address, kpte, mk_pte(page, prot), 0);
175 pte_unmap(kpte);
176
177 return 0;
178}
179
180/*
181 * Change the page attributes of an page in the linear mapping.
182 *
183 * THIS DOES NOTHING WITH BAT MAPPINGS, DEBUG USE ONLY
184 */
185static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
186{
187 int i, err = 0;
188 unsigned long flags;
189 struct page *start = page;
190
191 local_irq_save(flags);
192 for (i = 0; i < numpages; i++, page++) {
193 err = __change_page_attr_noflush(page, prot);
194 if (err)
195 break;
196 }
197 wmb();
198 local_irq_restore(flags);
199 flush_tlb_kernel_range((unsigned long)page_address(start),
200 (unsigned long)page_address(page));
201 return err;
202}
203
204void mark_initmem_nx(void)
205{
206 struct page *page = virt_to_page(_sinittext);
207 unsigned long numpages = PFN_UP((unsigned long)_einittext) -
208 PFN_DOWN((unsigned long)_sinittext);
209
210 if (v_block_mapped((unsigned long)_stext + 1))
211 mmu_mark_initmem_nx();
212 else
213 change_page_attr(page, numpages, PAGE_KERNEL);
214}
215
216#ifdef CONFIG_STRICT_KERNEL_RWX
217void mark_rodata_ro(void)
218{
219 struct page *page;
220 unsigned long numpages;
221
222 if (v_block_mapped((unsigned long)_sinittext)) {
223 mmu_mark_rodata_ro();
224 return;
225 }
226
227 page = virt_to_page(_stext);
228 numpages = PFN_UP((unsigned long)_etext) -
229 PFN_DOWN((unsigned long)_stext);
230
231 change_page_attr(page, numpages, PAGE_KERNEL_ROX);
232 /*
233 * mark .rodata as read only. Use __init_begin rather than __end_rodata
234 * to cover NOTES and EXCEPTION_TABLE.
235 */
236 page = virt_to_page(__start_rodata);
237 numpages = PFN_UP((unsigned long)__init_begin) -
238 PFN_DOWN((unsigned long)__start_rodata);
239
240 change_page_attr(page, numpages, PAGE_KERNEL_RO);
241
242 // mark_initmem_nx() should have already run by now
243 ptdump_check_wx();
244}
245#endif
246
247#ifdef CONFIG_DEBUG_PAGEALLOC
248void __kernel_map_pages(struct page *page, int numpages, int enable)
249{
250 if (PageHighMem(page))
251 return;
252
253 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
254}
255#endif /* CONFIG_DEBUG_PAGEALLOC */