Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#include <linux/stddef.h>
7#include <linux/module.h>
8#include <linux/memblock.h>
9#include <linux/highmem.h>
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/slab.h>
13#include <asm/fixmap.h>
14#include <asm/page.h>
15#include <as-layout.h>
16#include <init.h>
17#include <kern.h>
18#include <kern_util.h>
19#include <mem_user.h>
20#include <os.h>
21
22/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
23unsigned long *empty_zero_page = NULL;
24EXPORT_SYMBOL(empty_zero_page);
25
26/*
27 * Initialized during boot, and readonly for initializing page tables
28 * afterwards
29 */
30pgd_t swapper_pg_dir[PTRS_PER_PGD];
31
32/* Initialized at boot time, and readonly after that */
33unsigned long long highmem;
34EXPORT_SYMBOL(highmem);
35int kmalloc_ok = 0;
36
37/* Used during early boot */
38static unsigned long brk_end;
39
40void __init mem_init(void)
41{
42 /* clear the zero-page */
43 memset(empty_zero_page, 0, PAGE_SIZE);
44
45 /* Map in the area just after the brk now that kmalloc is about
46 * to be turned on.
47 */
48 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
49 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
50 memblock_free(__pa(brk_end), uml_reserved - brk_end);
51 uml_reserved = brk_end;
52
53 /* this will put all low memory onto the freelists */
54 memblock_free_all();
55 max_low_pfn = totalram_pages();
56 max_pfn = max_low_pfn;
57 mem_init_print_info(NULL);
58 kmalloc_ok = 1;
59}
60
61/*
62 * Create a page table and place a pointer to it in a middle page
63 * directory entry.
64 */
65static void __init one_page_table_init(pmd_t *pmd)
66{
67 if (pmd_none(*pmd)) {
68 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
69 PAGE_SIZE);
70 if (!pte)
71 panic("%s: Failed to allocate %lu bytes align=%lx\n",
72 __func__, PAGE_SIZE, PAGE_SIZE);
73
74 set_pmd(pmd, __pmd(_KERNPG_TABLE +
75 (unsigned long) __pa(pte)));
76 if (pte != pte_offset_kernel(pmd, 0))
77 BUG();
78 }
79}
80
81static void __init one_md_table_init(pud_t *pud)
82{
83#ifdef CONFIG_3_LEVEL_PGTABLES
84 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
85 if (!pmd_table)
86 panic("%s: Failed to allocate %lu bytes align=%lx\n",
87 __func__, PAGE_SIZE, PAGE_SIZE);
88
89 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
90 if (pmd_table != pmd_offset(pud, 0))
91 BUG();
92#endif
93}
94
95static void __init fixrange_init(unsigned long start, unsigned long end,
96 pgd_t *pgd_base)
97{
98 pgd_t *pgd;
99 p4d_t *p4d;
100 pud_t *pud;
101 pmd_t *pmd;
102 int i, j;
103 unsigned long vaddr;
104
105 vaddr = start;
106 i = pgd_index(vaddr);
107 j = pmd_index(vaddr);
108 pgd = pgd_base + i;
109
110 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
111 p4d = p4d_offset(pgd, vaddr);
112 pud = pud_offset(p4d, vaddr);
113 if (pud_none(*pud))
114 one_md_table_init(pud);
115 pmd = pmd_offset(pud, vaddr);
116 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
117 one_page_table_init(pmd);
118 vaddr += PMD_SIZE;
119 }
120 j = 0;
121 }
122}
123
124static void __init fixaddr_user_init( void)
125{
126#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
127 long size = FIXADDR_USER_END - FIXADDR_USER_START;
128 pte_t *pte;
129 phys_t p;
130 unsigned long v, vaddr = FIXADDR_USER_START;
131
132 if (!size)
133 return;
134
135 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
136 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
137 if (!v)
138 panic("%s: Failed to allocate %lu bytes align=%lx\n",
139 __func__, size, PAGE_SIZE);
140
141 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
142 p = __pa(v);
143 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
144 p += PAGE_SIZE) {
145 pte = virt_to_kpte(vaddr);
146 pte_set_val(*pte, p, PAGE_READONLY);
147 }
148#endif
149}
150
151void __init paging_init(void)
152{
153 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
154 unsigned long vaddr;
155
156 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
157 PAGE_SIZE);
158 if (!empty_zero_page)
159 panic("%s: Failed to allocate %lu bytes align=%lx\n",
160 __func__, PAGE_SIZE, PAGE_SIZE);
161
162 max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
163 free_area_init(max_zone_pfn);
164
165 /*
166 * Fixed mappings, only the page table structure has to be
167 * created - mappings will be set by set_fixmap():
168 */
169 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
170 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
171
172 fixaddr_user_init();
173}
174
175/*
176 * This can't do anything because nothing in the kernel image can be freed
177 * since it's not in kernel physical memory.
178 */
179
180void free_initmem(void)
181{
182}
183
184/* Allocate and free page tables. */
185
186pgd_t *pgd_alloc(struct mm_struct *mm)
187{
188 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
189
190 if (pgd) {
191 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
192 memcpy(pgd + USER_PTRS_PER_PGD,
193 swapper_pg_dir + USER_PTRS_PER_PGD,
194 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
195 }
196 return pgd;
197}
198
199void *uml_kmalloc(int size, int flags)
200{
201 return kmalloc(size, flags);
202}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4 */
5
6#include <linux/stddef.h>
7#include <linux/module.h>
8#include <linux/memblock.h>
9#include <linux/highmem.h>
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/slab.h>
13#include <asm/fixmap.h>
14#include <asm/page.h>
15#include <as-layout.h>
16#include <init.h>
17#include <kern.h>
18#include <kern_util.h>
19#include <mem_user.h>
20#include <os.h>
21#include <linux/sched/task.h>
22
23#ifdef CONFIG_KASAN
24int kasan_um_is_ready;
25void kasan_init(void)
26{
27 /*
28 * kasan_map_memory will map all of the required address space and
29 * the host machine will allocate physical memory as necessary.
30 */
31 kasan_map_memory((void *)KASAN_SHADOW_START, KASAN_SHADOW_SIZE);
32 init_task.kasan_depth = 0;
33 kasan_um_is_ready = true;
34}
35
36static void (*kasan_init_ptr)(void)
37__section(".kasan_init") __used
38= kasan_init;
39#endif
40
41/* allocated in paging_init, zeroed in mem_init, and unchanged thereafter */
42unsigned long *empty_zero_page = NULL;
43EXPORT_SYMBOL(empty_zero_page);
44
45/*
46 * Initialized during boot, and readonly for initializing page tables
47 * afterwards
48 */
49pgd_t swapper_pg_dir[PTRS_PER_PGD];
50
51/* Initialized at boot time, and readonly after that */
52unsigned long long highmem;
53EXPORT_SYMBOL(highmem);
54int kmalloc_ok = 0;
55
56/* Used during early boot */
57static unsigned long brk_end;
58
59void __init mem_init(void)
60{
61 /* clear the zero-page */
62 memset(empty_zero_page, 0, PAGE_SIZE);
63
64 /* Map in the area just after the brk now that kmalloc is about
65 * to be turned on.
66 */
67 brk_end = (unsigned long) UML_ROUND_UP(sbrk(0));
68 map_memory(brk_end, __pa(brk_end), uml_reserved - brk_end, 1, 1, 0);
69 memblock_free((void *)brk_end, uml_reserved - brk_end);
70 uml_reserved = brk_end;
71
72 /* this will put all low memory onto the freelists */
73 memblock_free_all();
74 max_low_pfn = totalram_pages();
75 max_pfn = max_low_pfn;
76 kmalloc_ok = 1;
77}
78
79/*
80 * Create a page table and place a pointer to it in a middle page
81 * directory entry.
82 */
83static void __init one_page_table_init(pmd_t *pmd)
84{
85 if (pmd_none(*pmd)) {
86 pte_t *pte = (pte_t *) memblock_alloc_low(PAGE_SIZE,
87 PAGE_SIZE);
88 if (!pte)
89 panic("%s: Failed to allocate %lu bytes align=%lx\n",
90 __func__, PAGE_SIZE, PAGE_SIZE);
91
92 set_pmd(pmd, __pmd(_KERNPG_TABLE +
93 (unsigned long) __pa(pte)));
94 BUG_ON(pte != pte_offset_kernel(pmd, 0));
95 }
96}
97
98static void __init one_md_table_init(pud_t *pud)
99{
100#ifdef CONFIG_3_LEVEL_PGTABLES
101 pmd_t *pmd_table = (pmd_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
102 if (!pmd_table)
103 panic("%s: Failed to allocate %lu bytes align=%lx\n",
104 __func__, PAGE_SIZE, PAGE_SIZE);
105
106 set_pud(pud, __pud(_KERNPG_TABLE + (unsigned long) __pa(pmd_table)));
107 BUG_ON(pmd_table != pmd_offset(pud, 0));
108#endif
109}
110
111static void __init fixrange_init(unsigned long start, unsigned long end,
112 pgd_t *pgd_base)
113{
114 pgd_t *pgd;
115 p4d_t *p4d;
116 pud_t *pud;
117 pmd_t *pmd;
118 int i, j;
119 unsigned long vaddr;
120
121 vaddr = start;
122 i = pgd_index(vaddr);
123 j = pmd_index(vaddr);
124 pgd = pgd_base + i;
125
126 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
127 p4d = p4d_offset(pgd, vaddr);
128 pud = pud_offset(p4d, vaddr);
129 if (pud_none(*pud))
130 one_md_table_init(pud);
131 pmd = pmd_offset(pud, vaddr);
132 for (; (j < PTRS_PER_PMD) && (vaddr < end); pmd++, j++) {
133 one_page_table_init(pmd);
134 vaddr += PMD_SIZE;
135 }
136 j = 0;
137 }
138}
139
140static void __init fixaddr_user_init( void)
141{
142#ifdef CONFIG_ARCH_REUSE_HOST_VSYSCALL_AREA
143 long size = FIXADDR_USER_END - FIXADDR_USER_START;
144 pte_t *pte;
145 phys_t p;
146 unsigned long v, vaddr = FIXADDR_USER_START;
147
148 if (!size)
149 return;
150
151 fixrange_init( FIXADDR_USER_START, FIXADDR_USER_END, swapper_pg_dir);
152 v = (unsigned long) memblock_alloc_low(size, PAGE_SIZE);
153 if (!v)
154 panic("%s: Failed to allocate %lu bytes align=%lx\n",
155 __func__, size, PAGE_SIZE);
156
157 memcpy((void *) v , (void *) FIXADDR_USER_START, size);
158 p = __pa(v);
159 for ( ; size > 0; size -= PAGE_SIZE, vaddr += PAGE_SIZE,
160 p += PAGE_SIZE) {
161 pte = virt_to_kpte(vaddr);
162 pte_set_val(*pte, p, PAGE_READONLY);
163 }
164#endif
165}
166
167void __init paging_init(void)
168{
169 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
170 unsigned long vaddr;
171
172 empty_zero_page = (unsigned long *) memblock_alloc_low(PAGE_SIZE,
173 PAGE_SIZE);
174 if (!empty_zero_page)
175 panic("%s: Failed to allocate %lu bytes align=%lx\n",
176 __func__, PAGE_SIZE, PAGE_SIZE);
177
178 max_zone_pfn[ZONE_NORMAL] = end_iomem >> PAGE_SHIFT;
179 free_area_init(max_zone_pfn);
180
181 /*
182 * Fixed mappings, only the page table structure has to be
183 * created - mappings will be set by set_fixmap():
184 */
185 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
186 fixrange_init(vaddr, FIXADDR_TOP, swapper_pg_dir);
187
188 fixaddr_user_init();
189}
190
191/*
192 * This can't do anything because nothing in the kernel image can be freed
193 * since it's not in kernel physical memory.
194 */
195
196void free_initmem(void)
197{
198}
199
200/* Allocate and free page tables. */
201
202pgd_t *pgd_alloc(struct mm_struct *mm)
203{
204 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
205
206 if (pgd) {
207 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
208 memcpy(pgd + USER_PTRS_PER_PGD,
209 swapper_pg_dir + USER_PTRS_PER_PGD,
210 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
211 }
212 return pgd;
213}
214
215void *uml_kmalloc(int size, int flags)
216{
217 return kmalloc(size, flags);
218}
219
220static const pgprot_t protection_map[16] = {
221 [VM_NONE] = PAGE_NONE,
222 [VM_READ] = PAGE_READONLY,
223 [VM_WRITE] = PAGE_COPY,
224 [VM_WRITE | VM_READ] = PAGE_COPY,
225 [VM_EXEC] = PAGE_READONLY,
226 [VM_EXEC | VM_READ] = PAGE_READONLY,
227 [VM_EXEC | VM_WRITE] = PAGE_COPY,
228 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
229 [VM_SHARED] = PAGE_NONE,
230 [VM_SHARED | VM_READ] = PAGE_READONLY,
231 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
232 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
233 [VM_SHARED | VM_EXEC] = PAGE_READONLY,
234 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY,
235 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
236 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED
237};
238DECLARE_VM_GET_PAGE_PROT