Loading...
1/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/hugetlb.h>
14#include <linux/slab.h>
15#include <asm/pgalloc.h>
16#include <asm/pgtable.h>
17#include <asm/setup.h>
18#include <asm/tlbflush.h>
19#include <asm/sections.h>
20
21static DEFINE_MUTEX(vmem_mutex);
22
23struct memory_segment {
24 struct list_head list;
25 unsigned long start;
26 unsigned long size;
27};
28
29static LIST_HEAD(mem_segs);
30
31static void __ref *vmem_alloc_pages(unsigned int order)
32{
33 if (slab_is_available())
34 return (void *)__get_free_pages(GFP_KERNEL, order);
35 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
36}
37
38static inline pud_t *vmem_pud_alloc(void)
39{
40 pud_t *pud = NULL;
41
42#ifdef CONFIG_64BIT
43 pud = vmem_alloc_pages(2);
44 if (!pud)
45 return NULL;
46 clear_table((unsigned long *) pud, _REGION3_ENTRY_EMPTY, PAGE_SIZE * 4);
47#endif
48 return pud;
49}
50
51static inline pmd_t *vmem_pmd_alloc(void)
52{
53 pmd_t *pmd = NULL;
54
55#ifdef CONFIG_64BIT
56 pmd = vmem_alloc_pages(2);
57 if (!pmd)
58 return NULL;
59 clear_table((unsigned long *) pmd, _SEGMENT_ENTRY_EMPTY, PAGE_SIZE * 4);
60#endif
61 return pmd;
62}
63
64static pte_t __ref *vmem_pte_alloc(unsigned long address)
65{
66 pte_t *pte;
67
68 if (slab_is_available())
69 pte = (pte_t *) page_table_alloc(&init_mm, address);
70 else
71 pte = alloc_bootmem(PTRS_PER_PTE * sizeof(pte_t));
72 if (!pte)
73 return NULL;
74 clear_table((unsigned long *) pte, _PAGE_TYPE_EMPTY,
75 PTRS_PER_PTE * sizeof(pte_t));
76 return pte;
77}
78
79/*
80 * Add a physical memory range to the 1:1 mapping.
81 */
82static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
83{
84 unsigned long address;
85 pgd_t *pg_dir;
86 pud_t *pu_dir;
87 pmd_t *pm_dir;
88 pte_t *pt_dir;
89 pte_t pte;
90 int ret = -ENOMEM;
91
92 for (address = start; address < start + size; address += PAGE_SIZE) {
93 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) {
95 pu_dir = vmem_pud_alloc();
96 if (!pu_dir)
97 goto out;
98 pgd_populate(&init_mm, pg_dir, pu_dir);
99 }
100
101 pu_dir = pud_offset(pg_dir, address);
102 if (pud_none(*pu_dir)) {
103 pm_dir = vmem_pmd_alloc();
104 if (!pm_dir)
105 goto out;
106 pud_populate(&init_mm, pu_dir, pm_dir);
107 }
108
109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
110 pm_dir = pmd_offset(pu_dir, address);
111
112#ifdef __s390x__
113 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
114 (address + HPAGE_SIZE <= start + size) &&
115 (address >= HPAGE_SIZE)) {
116 pte_val(pte) |= _SEGMENT_ENTRY_LARGE;
117 pmd_val(*pm_dir) = pte_val(pte);
118 address += HPAGE_SIZE - PAGE_SIZE;
119 continue;
120 }
121#endif
122 if (pmd_none(*pm_dir)) {
123 pt_dir = vmem_pte_alloc(address);
124 if (!pt_dir)
125 goto out;
126 pmd_populate(&init_mm, pm_dir, pt_dir);
127 }
128
129 pt_dir = pte_offset_kernel(pm_dir, address);
130 *pt_dir = pte;
131 }
132 ret = 0;
133out:
134 flush_tlb_kernel_range(start, start + size);
135 return ret;
136}
137
138/*
139 * Remove a physical memory range from the 1:1 mapping.
140 * Currently only invalidates page table entries.
141 */
142static void vmem_remove_range(unsigned long start, unsigned long size)
143{
144 unsigned long address;
145 pgd_t *pg_dir;
146 pud_t *pu_dir;
147 pmd_t *pm_dir;
148 pte_t *pt_dir;
149 pte_t pte;
150
151 pte_val(pte) = _PAGE_TYPE_EMPTY;
152 for (address = start; address < start + size; address += PAGE_SIZE) {
153 pg_dir = pgd_offset_k(address);
154 pu_dir = pud_offset(pg_dir, address);
155 if (pud_none(*pu_dir))
156 continue;
157 pm_dir = pmd_offset(pu_dir, address);
158 if (pmd_none(*pm_dir))
159 continue;
160
161 if (pmd_huge(*pm_dir)) {
162 pmd_clear(pm_dir);
163 address += HPAGE_SIZE - PAGE_SIZE;
164 continue;
165 }
166
167 pt_dir = pte_offset_kernel(pm_dir, address);
168 *pt_dir = pte;
169 }
170 flush_tlb_kernel_range(start, start + size);
171}
172
173/*
174 * Add a backed mem_map array to the virtual mem_map array.
175 */
176int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
177{
178 unsigned long address, start_addr, end_addr;
179 pgd_t *pg_dir;
180 pud_t *pu_dir;
181 pmd_t *pm_dir;
182 pte_t *pt_dir;
183 pte_t pte;
184 int ret = -ENOMEM;
185
186 start_addr = (unsigned long) start;
187 end_addr = (unsigned long) (start + nr);
188
189 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
190 pg_dir = pgd_offset_k(address);
191 if (pgd_none(*pg_dir)) {
192 pu_dir = vmem_pud_alloc();
193 if (!pu_dir)
194 goto out;
195 pgd_populate(&init_mm, pg_dir, pu_dir);
196 }
197
198 pu_dir = pud_offset(pg_dir, address);
199 if (pud_none(*pu_dir)) {
200 pm_dir = vmem_pmd_alloc();
201 if (!pm_dir)
202 goto out;
203 pud_populate(&init_mm, pu_dir, pm_dir);
204 }
205
206 pm_dir = pmd_offset(pu_dir, address);
207 if (pmd_none(*pm_dir)) {
208 pt_dir = vmem_pte_alloc(address);
209 if (!pt_dir)
210 goto out;
211 pmd_populate(&init_mm, pm_dir, pt_dir);
212 }
213
214 pt_dir = pte_offset_kernel(pm_dir, address);
215 if (pte_none(*pt_dir)) {
216 unsigned long new_page;
217
218 new_page =__pa(vmem_alloc_pages(0));
219 if (!new_page)
220 goto out;
221 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
222 *pt_dir = pte;
223 }
224 }
225 memset(start, 0, nr * sizeof(struct page));
226 ret = 0;
227out:
228 flush_tlb_kernel_range(start_addr, end_addr);
229 return ret;
230}
231
232/*
233 * Add memory segment to the segment list if it doesn't overlap with
234 * an already present segment.
235 */
236static int insert_memory_segment(struct memory_segment *seg)
237{
238 struct memory_segment *tmp;
239
240 if (seg->start + seg->size > VMEM_MAX_PHYS ||
241 seg->start + seg->size < seg->start)
242 return -ERANGE;
243
244 list_for_each_entry(tmp, &mem_segs, list) {
245 if (seg->start >= tmp->start + tmp->size)
246 continue;
247 if (seg->start + seg->size <= tmp->start)
248 continue;
249 return -ENOSPC;
250 }
251 list_add(&seg->list, &mem_segs);
252 return 0;
253}
254
255/*
256 * Remove memory segment from the segment list.
257 */
258static void remove_memory_segment(struct memory_segment *seg)
259{
260 list_del(&seg->list);
261}
262
263static void __remove_shared_memory(struct memory_segment *seg)
264{
265 remove_memory_segment(seg);
266 vmem_remove_range(seg->start, seg->size);
267}
268
269int vmem_remove_mapping(unsigned long start, unsigned long size)
270{
271 struct memory_segment *seg;
272 int ret;
273
274 mutex_lock(&vmem_mutex);
275
276 ret = -ENOENT;
277 list_for_each_entry(seg, &mem_segs, list) {
278 if (seg->start == start && seg->size == size)
279 break;
280 }
281
282 if (seg->start != start || seg->size != size)
283 goto out;
284
285 ret = 0;
286 __remove_shared_memory(seg);
287 kfree(seg);
288out:
289 mutex_unlock(&vmem_mutex);
290 return ret;
291}
292
293int vmem_add_mapping(unsigned long start, unsigned long size)
294{
295 struct memory_segment *seg;
296 int ret;
297
298 mutex_lock(&vmem_mutex);
299 ret = -ENOMEM;
300 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
301 if (!seg)
302 goto out;
303 seg->start = start;
304 seg->size = size;
305
306 ret = insert_memory_segment(seg);
307 if (ret)
308 goto out_free;
309
310 ret = vmem_add_mem(start, size, 0);
311 if (ret)
312 goto out_remove;
313 goto out;
314
315out_remove:
316 __remove_shared_memory(seg);
317out_free:
318 kfree(seg);
319out:
320 mutex_unlock(&vmem_mutex);
321 return ret;
322}
323
324/*
325 * map whole physical memory to virtual memory (identity mapping)
326 * we reserve enough space in the vmalloc area for vmemmap to hotplug
327 * additional memory segments.
328 */
329void __init vmem_map_init(void)
330{
331 unsigned long ro_start, ro_end;
332 unsigned long start, end;
333 int i;
334
335 ro_start = ((unsigned long)&_stext) & PAGE_MASK;
336 ro_end = PFN_ALIGN((unsigned long)&_eshared);
337 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
338 start = memory_chunk[i].addr;
339 end = memory_chunk[i].addr + memory_chunk[i].size;
340 if (start >= ro_end || end <= ro_start)
341 vmem_add_mem(start, end - start, 0);
342 else if (start >= ro_start && end <= ro_end)
343 vmem_add_mem(start, end - start, 1);
344 else if (start >= ro_start) {
345 vmem_add_mem(start, ro_end - start, 1);
346 vmem_add_mem(ro_end, end - ro_end, 0);
347 } else if (end < ro_end) {
348 vmem_add_mem(start, ro_start - start, 0);
349 vmem_add_mem(ro_start, end - ro_start, 1);
350 } else {
351 vmem_add_mem(start, ro_start - start, 0);
352 vmem_add_mem(ro_start, ro_end - ro_start, 1);
353 vmem_add_mem(ro_end, end - ro_end, 0);
354 }
355 }
356}
357
358/*
359 * Convert memory chunk array to a memory segment list so there is a single
360 * list that contains both r/w memory and shared memory segments.
361 */
362static int __init vmem_convert_memory_chunk(void)
363{
364 struct memory_segment *seg;
365 int i;
366
367 mutex_lock(&vmem_mutex);
368 for (i = 0; i < MEMORY_CHUNKS; i++) {
369 if (!memory_chunk[i].size)
370 continue;
371 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
372 if (!seg)
373 panic("Out of memory...\n");
374 seg->start = memory_chunk[i].addr;
375 seg->size = memory_chunk[i].size;
376 insert_memory_segment(seg);
377 }
378 mutex_unlock(&vmem_mutex);
379 return 0;
380}
381
382core_initcall(vmem_convert_memory_chunk);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2006
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
7#include <linux/bootmem.h>
8#include <linux/pfn.h>
9#include <linux/mm.h>
10#include <linux/init.h>
11#include <linux/list.h>
12#include <linux/hugetlb.h>
13#include <linux/slab.h>
14#include <linux/memblock.h>
15#include <asm/cacheflush.h>
16#include <asm/pgalloc.h>
17#include <asm/pgtable.h>
18#include <asm/setup.h>
19#include <asm/tlbflush.h>
20#include <asm/sections.h>
21#include <asm/set_memory.h>
22
23static DEFINE_MUTEX(vmem_mutex);
24
25struct memory_segment {
26 struct list_head list;
27 unsigned long start;
28 unsigned long size;
29};
30
31static LIST_HEAD(mem_segs);
32
33static void __ref *vmem_alloc_pages(unsigned int order)
34{
35 unsigned long size = PAGE_SIZE << order;
36
37 if (slab_is_available())
38 return (void *)__get_free_pages(GFP_KERNEL, order);
39 return (void *) memblock_alloc(size, size);
40}
41
42void *vmem_crst_alloc(unsigned long val)
43{
44 unsigned long *table;
45
46 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
47 if (table)
48 crst_table_init(table, val);
49 return table;
50}
51
52pte_t __ref *vmem_pte_alloc(void)
53{
54 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
55 pte_t *pte;
56
57 if (slab_is_available())
58 pte = (pte_t *) page_table_alloc(&init_mm);
59 else
60 pte = (pte_t *) memblock_alloc(size, size);
61 if (!pte)
62 return NULL;
63 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
64 return pte;
65}
66
67/*
68 * Add a physical memory range to the 1:1 mapping.
69 */
70static int vmem_add_mem(unsigned long start, unsigned long size)
71{
72 unsigned long pgt_prot, sgt_prot, r3_prot;
73 unsigned long pages4k, pages1m, pages2g;
74 unsigned long end = start + size;
75 unsigned long address = start;
76 pgd_t *pg_dir;
77 p4d_t *p4_dir;
78 pud_t *pu_dir;
79 pmd_t *pm_dir;
80 pte_t *pt_dir;
81 int ret = -ENOMEM;
82
83 pgt_prot = pgprot_val(PAGE_KERNEL);
84 sgt_prot = pgprot_val(SEGMENT_KERNEL);
85 r3_prot = pgprot_val(REGION3_KERNEL);
86 if (!MACHINE_HAS_NX) {
87 pgt_prot &= ~_PAGE_NOEXEC;
88 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
89 r3_prot &= ~_REGION_ENTRY_NOEXEC;
90 }
91 pages4k = pages1m = pages2g = 0;
92 while (address < end) {
93 pg_dir = pgd_offset_k(address);
94 if (pgd_none(*pg_dir)) {
95 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
96 if (!p4_dir)
97 goto out;
98 pgd_populate(&init_mm, pg_dir, p4_dir);
99 }
100 p4_dir = p4d_offset(pg_dir, address);
101 if (p4d_none(*p4_dir)) {
102 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
103 if (!pu_dir)
104 goto out;
105 p4d_populate(&init_mm, p4_dir, pu_dir);
106 }
107 pu_dir = pud_offset(p4_dir, address);
108 if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
109 !(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
110 !debug_pagealloc_enabled()) {
111 pud_val(*pu_dir) = address | r3_prot;
112 address += PUD_SIZE;
113 pages2g++;
114 continue;
115 }
116 if (pud_none(*pu_dir)) {
117 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
118 if (!pm_dir)
119 goto out;
120 pud_populate(&init_mm, pu_dir, pm_dir);
121 }
122 pm_dir = pmd_offset(pu_dir, address);
123 if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
124 !(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
125 !debug_pagealloc_enabled()) {
126 pmd_val(*pm_dir) = address | sgt_prot;
127 address += PMD_SIZE;
128 pages1m++;
129 continue;
130 }
131 if (pmd_none(*pm_dir)) {
132 pt_dir = vmem_pte_alloc();
133 if (!pt_dir)
134 goto out;
135 pmd_populate(&init_mm, pm_dir, pt_dir);
136 }
137
138 pt_dir = pte_offset_kernel(pm_dir, address);
139 pte_val(*pt_dir) = address | pgt_prot;
140 address += PAGE_SIZE;
141 pages4k++;
142 }
143 ret = 0;
144out:
145 update_page_count(PG_DIRECT_MAP_4K, pages4k);
146 update_page_count(PG_DIRECT_MAP_1M, pages1m);
147 update_page_count(PG_DIRECT_MAP_2G, pages2g);
148 return ret;
149}
150
151/*
152 * Remove a physical memory range from the 1:1 mapping.
153 * Currently only invalidates page table entries.
154 */
155static void vmem_remove_range(unsigned long start, unsigned long size)
156{
157 unsigned long pages4k, pages1m, pages2g;
158 unsigned long end = start + size;
159 unsigned long address = start;
160 pgd_t *pg_dir;
161 p4d_t *p4_dir;
162 pud_t *pu_dir;
163 pmd_t *pm_dir;
164 pte_t *pt_dir;
165
166 pages4k = pages1m = pages2g = 0;
167 while (address < end) {
168 pg_dir = pgd_offset_k(address);
169 if (pgd_none(*pg_dir)) {
170 address += PGDIR_SIZE;
171 continue;
172 }
173 p4_dir = p4d_offset(pg_dir, address);
174 if (p4d_none(*p4_dir)) {
175 address += P4D_SIZE;
176 continue;
177 }
178 pu_dir = pud_offset(p4_dir, address);
179 if (pud_none(*pu_dir)) {
180 address += PUD_SIZE;
181 continue;
182 }
183 if (pud_large(*pu_dir)) {
184 pud_clear(pu_dir);
185 address += PUD_SIZE;
186 pages2g++;
187 continue;
188 }
189 pm_dir = pmd_offset(pu_dir, address);
190 if (pmd_none(*pm_dir)) {
191 address += PMD_SIZE;
192 continue;
193 }
194 if (pmd_large(*pm_dir)) {
195 pmd_clear(pm_dir);
196 address += PMD_SIZE;
197 pages1m++;
198 continue;
199 }
200 pt_dir = pte_offset_kernel(pm_dir, address);
201 pte_clear(&init_mm, address, pt_dir);
202 address += PAGE_SIZE;
203 pages4k++;
204 }
205 flush_tlb_kernel_range(start, end);
206 update_page_count(PG_DIRECT_MAP_4K, -pages4k);
207 update_page_count(PG_DIRECT_MAP_1M, -pages1m);
208 update_page_count(PG_DIRECT_MAP_2G, -pages2g);
209}
210
211/*
212 * Add a backed mem_map array to the virtual mem_map array.
213 */
214int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
215 struct vmem_altmap *altmap)
216{
217 unsigned long pgt_prot, sgt_prot;
218 unsigned long address = start;
219 pgd_t *pg_dir;
220 p4d_t *p4_dir;
221 pud_t *pu_dir;
222 pmd_t *pm_dir;
223 pte_t *pt_dir;
224 int ret = -ENOMEM;
225
226 pgt_prot = pgprot_val(PAGE_KERNEL);
227 sgt_prot = pgprot_val(SEGMENT_KERNEL);
228 if (!MACHINE_HAS_NX) {
229 pgt_prot &= ~_PAGE_NOEXEC;
230 sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
231 }
232 for (address = start; address < end;) {
233 pg_dir = pgd_offset_k(address);
234 if (pgd_none(*pg_dir)) {
235 p4_dir = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
236 if (!p4_dir)
237 goto out;
238 pgd_populate(&init_mm, pg_dir, p4_dir);
239 }
240
241 p4_dir = p4d_offset(pg_dir, address);
242 if (p4d_none(*p4_dir)) {
243 pu_dir = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
244 if (!pu_dir)
245 goto out;
246 p4d_populate(&init_mm, p4_dir, pu_dir);
247 }
248
249 pu_dir = pud_offset(p4_dir, address);
250 if (pud_none(*pu_dir)) {
251 pm_dir = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
252 if (!pm_dir)
253 goto out;
254 pud_populate(&init_mm, pu_dir, pm_dir);
255 }
256
257 pm_dir = pmd_offset(pu_dir, address);
258 if (pmd_none(*pm_dir)) {
259 /* Use 1MB frames for vmemmap if available. We always
260 * use large frames even if they are only partially
261 * used.
262 * Otherwise we would have also page tables since
263 * vmemmap_populate gets called for each section
264 * separately. */
265 if (MACHINE_HAS_EDAT1) {
266 void *new_page;
267
268 new_page = vmemmap_alloc_block(PMD_SIZE, node);
269 if (!new_page)
270 goto out;
271 pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
272 address = (address + PMD_SIZE) & PMD_MASK;
273 continue;
274 }
275 pt_dir = vmem_pte_alloc();
276 if (!pt_dir)
277 goto out;
278 pmd_populate(&init_mm, pm_dir, pt_dir);
279 } else if (pmd_large(*pm_dir)) {
280 address = (address + PMD_SIZE) & PMD_MASK;
281 continue;
282 }
283
284 pt_dir = pte_offset_kernel(pm_dir, address);
285 if (pte_none(*pt_dir)) {
286 void *new_page;
287
288 new_page = vmemmap_alloc_block(PAGE_SIZE, node);
289 if (!new_page)
290 goto out;
291 pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
292 }
293 address += PAGE_SIZE;
294 }
295 ret = 0;
296out:
297 return ret;
298}
299
300void vmemmap_free(unsigned long start, unsigned long end,
301 struct vmem_altmap *altmap)
302{
303}
304
305/*
306 * Add memory segment to the segment list if it doesn't overlap with
307 * an already present segment.
308 */
309static int insert_memory_segment(struct memory_segment *seg)
310{
311 struct memory_segment *tmp;
312
313 if (seg->start + seg->size > VMEM_MAX_PHYS ||
314 seg->start + seg->size < seg->start)
315 return -ERANGE;
316
317 list_for_each_entry(tmp, &mem_segs, list) {
318 if (seg->start >= tmp->start + tmp->size)
319 continue;
320 if (seg->start + seg->size <= tmp->start)
321 continue;
322 return -ENOSPC;
323 }
324 list_add(&seg->list, &mem_segs);
325 return 0;
326}
327
328/*
329 * Remove memory segment from the segment list.
330 */
331static void remove_memory_segment(struct memory_segment *seg)
332{
333 list_del(&seg->list);
334}
335
336static void __remove_shared_memory(struct memory_segment *seg)
337{
338 remove_memory_segment(seg);
339 vmem_remove_range(seg->start, seg->size);
340}
341
342int vmem_remove_mapping(unsigned long start, unsigned long size)
343{
344 struct memory_segment *seg;
345 int ret;
346
347 mutex_lock(&vmem_mutex);
348
349 ret = -ENOENT;
350 list_for_each_entry(seg, &mem_segs, list) {
351 if (seg->start == start && seg->size == size)
352 break;
353 }
354
355 if (seg->start != start || seg->size != size)
356 goto out;
357
358 ret = 0;
359 __remove_shared_memory(seg);
360 kfree(seg);
361out:
362 mutex_unlock(&vmem_mutex);
363 return ret;
364}
365
366int vmem_add_mapping(unsigned long start, unsigned long size)
367{
368 struct memory_segment *seg;
369 int ret;
370
371 mutex_lock(&vmem_mutex);
372 ret = -ENOMEM;
373 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
374 if (!seg)
375 goto out;
376 seg->start = start;
377 seg->size = size;
378
379 ret = insert_memory_segment(seg);
380 if (ret)
381 goto out_free;
382
383 ret = vmem_add_mem(start, size);
384 if (ret)
385 goto out_remove;
386 goto out;
387
388out_remove:
389 __remove_shared_memory(seg);
390out_free:
391 kfree(seg);
392out:
393 mutex_unlock(&vmem_mutex);
394 return ret;
395}
396
397/*
398 * map whole physical memory to virtual memory (identity mapping)
399 * we reserve enough space in the vmalloc area for vmemmap to hotplug
400 * additional memory segments.
401 */
402void __init vmem_map_init(void)
403{
404 struct memblock_region *reg;
405
406 for_each_memblock(memory, reg)
407 vmem_add_mem(reg->base, reg->size);
408 __set_memory((unsigned long)_stext,
409 (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
410 SET_MEMORY_RO | SET_MEMORY_X);
411 __set_memory((unsigned long)_etext,
412 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
413 SET_MEMORY_RO);
414 __set_memory((unsigned long)_sinittext,
415 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
416 SET_MEMORY_RO | SET_MEMORY_X);
417 pr_info("Write protected kernel read-only data: %luk\n",
418 (unsigned long)(__end_rodata - _stext) >> 10);
419}
420
421/*
422 * Convert memblock.memory to a memory segment list so there is a single
423 * list that contains all memory segments.
424 */
425static int __init vmem_convert_memory_chunk(void)
426{
427 struct memblock_region *reg;
428 struct memory_segment *seg;
429
430 mutex_lock(&vmem_mutex);
431 for_each_memblock(memory, reg) {
432 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
433 if (!seg)
434 panic("Out of memory...\n");
435 seg->start = reg->base;
436 seg->size = reg->size;
437 insert_memory_segment(seg);
438 }
439 mutex_unlock(&vmem_mutex);
440 return 0;
441}
442
443core_initcall(vmem_convert_memory_chunk);