Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 */
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/swap.h>
10#include <linux/init.h>
11#include <linux/mman.h>
12#include <linux/sched/signal.h>
13#include <linux/sched/task.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/initrd.h>
17#include <linux/of_fdt.h>
18#include <linux/highmem.h>
19#include <linux/gfp.h>
20#include <linux/memblock.h>
21#include <linux/dma-map-ops.h>
22#include <linux/sizes.h>
23#include <linux/stop_machine.h>
24#include <linux/swiotlb.h>
25#include <linux/execmem.h>
26
27#include <asm/cp15.h>
28#include <asm/mach-types.h>
29#include <asm/memblock.h>
30#include <asm/page.h>
31#include <asm/prom.h>
32#include <asm/sections.h>
33#include <asm/setup.h>
34#include <asm/set_memory.h>
35#include <asm/system_info.h>
36#include <asm/tlb.h>
37#include <asm/fixmap.h>
38#include <asm/ptdump.h>
39
40#include <asm/mach/arch.h>
41#include <asm/mach/map.h>
42
43#include "mm.h"
44
45#ifdef CONFIG_CPU_CP15_MMU
46unsigned long __init __clear_cr(unsigned long mask)
47{
48 cr_alignment = cr_alignment & ~mask;
49 return cr_alignment;
50}
51#endif
52
53#ifdef CONFIG_BLK_DEV_INITRD
54static int __init parse_tag_initrd(const struct tag *tag)
55{
56 pr_warn("ATAG_INITRD is deprecated; "
57 "please update your bootloader.\n");
58 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
59 phys_initrd_size = tag->u.initrd.size;
60 return 0;
61}
62
63__tagtable(ATAG_INITRD, parse_tag_initrd);
64
65static int __init parse_tag_initrd2(const struct tag *tag)
66{
67 phys_initrd_start = tag->u.initrd.start;
68 phys_initrd_size = tag->u.initrd.size;
69 return 0;
70}
71
72__tagtable(ATAG_INITRD2, parse_tag_initrd2);
73#endif
74
75static void __init find_limits(unsigned long *min, unsigned long *max_low,
76 unsigned long *max_high)
77{
78 *max_low = PFN_DOWN(memblock_get_current_limit());
79 *min = PFN_UP(memblock_start_of_DRAM());
80 *max_high = PFN_DOWN(memblock_end_of_DRAM());
81}
82
83#ifdef CONFIG_ZONE_DMA
84
85phys_addr_t arm_dma_zone_size __read_mostly;
86EXPORT_SYMBOL(arm_dma_zone_size);
87
88/*
89 * The DMA mask corresponding to the maximum bus address allocatable
90 * using GFP_DMA. The default here places no restriction on DMA
91 * allocations. This must be the smallest DMA mask in the system,
92 * so a successful GFP_DMA allocation will always satisfy this.
93 */
94phys_addr_t arm_dma_limit;
95unsigned long arm_dma_pfn_limit;
96#endif
97
98void __init setup_dma_zone(const struct machine_desc *mdesc)
99{
100#ifdef CONFIG_ZONE_DMA
101 if (mdesc->dma_zone_size) {
102 arm_dma_zone_size = mdesc->dma_zone_size;
103 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
104 } else
105 arm_dma_limit = 0xffffffff;
106 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
107#endif
108}
109
110static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
111 unsigned long max_high)
112{
113 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
114
115#ifdef CONFIG_ZONE_DMA
116 max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
117#endif
118 max_zone_pfn[ZONE_NORMAL] = max_low;
119#ifdef CONFIG_HIGHMEM
120 max_zone_pfn[ZONE_HIGHMEM] = max_high;
121#endif
122 free_area_init(max_zone_pfn);
123}
124
125#ifdef CONFIG_HAVE_ARCH_PFN_VALID
126int pfn_valid(unsigned long pfn)
127{
128 phys_addr_t addr = __pfn_to_phys(pfn);
129 unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
130
131 if (__phys_to_pfn(addr) != pfn)
132 return 0;
133
134 /*
135 * If address less than pageblock_size bytes away from a present
136 * memory chunk there still will be a memory map entry for it
137 * because we round freed memory map to the pageblock boundaries.
138 */
139 if (memblock_overlaps_region(&memblock.memory,
140 ALIGN_DOWN(addr, pageblock_size),
141 pageblock_size))
142 return 1;
143
144 return 0;
145}
146EXPORT_SYMBOL(pfn_valid);
147#endif
148
149static bool arm_memblock_steal_permitted = true;
150
151phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
152{
153 phys_addr_t phys;
154
155 BUG_ON(!arm_memblock_steal_permitted);
156
157 phys = memblock_phys_alloc(size, align);
158 if (!phys)
159 panic("Failed to steal %pa bytes at %pS\n",
160 &size, (void *)_RET_IP_);
161
162 memblock_phys_free(phys, size);
163 memblock_remove(phys, size);
164
165 return phys;
166}
167
168#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
169void check_cpu_icache_size(int cpuid)
170{
171 u32 size, ctr;
172
173 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
174
175 size = 1 << ((ctr & 0xf) + 2);
176 if (cpuid != 0 && icache_size != size)
177 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
178 cpuid);
179 if (icache_size > size)
180 icache_size = size;
181}
182#endif
183
184void __init arm_memblock_init(const struct machine_desc *mdesc)
185{
186 /* Register the kernel text, kernel data and initrd with memblock. */
187 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
188
189 reserve_initrd_mem();
190
191 arm_mm_memblock_reserve();
192
193 /* reserve any platform specific memblock areas */
194 if (mdesc->reserve)
195 mdesc->reserve();
196
197 early_init_fdt_scan_reserved_mem();
198
199 /* reserve memory for DMA contiguous allocations */
200 dma_contiguous_reserve(arm_dma_limit);
201
202 arm_memblock_steal_permitted = false;
203 memblock_dump_all();
204}
205
206void __init bootmem_init(void)
207{
208 memblock_allow_resize();
209
210 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
211
212 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
213 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
214
215 /*
216 * sparse_init() tries to allocate memory from memblock, so must be
217 * done after the fixed reservations
218 */
219 sparse_init();
220
221 /*
222 * Now free the memory - free_area_init needs
223 * the sparse mem_map arrays initialized by sparse_init()
224 * for memmap_init_zone(), otherwise all PFNs are invalid.
225 */
226 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
227}
228
229/*
230 * Poison init memory with an undefined instruction (ARM) or a branch to an
231 * undefined instruction (Thumb).
232 */
233static inline void poison_init_mem(void *s, size_t count)
234{
235 u32 *p = (u32 *)s;
236 for (; count != 0; count -= 4)
237 *p++ = 0xe7fddef0;
238}
239
240static void __init free_highpages(void)
241{
242#ifdef CONFIG_HIGHMEM
243 unsigned long max_low = max_low_pfn;
244 phys_addr_t range_start, range_end;
245 u64 i;
246
247 /* set highmem page free */
248 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
249 &range_start, &range_end, NULL) {
250 unsigned long start = PFN_UP(range_start);
251 unsigned long end = PFN_DOWN(range_end);
252
253 /* Ignore complete lowmem entries */
254 if (end <= max_low)
255 continue;
256
257 /* Truncate partial highmem entries */
258 if (start < max_low)
259 start = max_low;
260
261 for (; start < end; start++)
262 free_highmem_page(pfn_to_page(start));
263 }
264#endif
265}
266
267/*
268 * mem_init() marks the free areas in the mem_map and tells us how much
269 * memory is free. This is done after various parts of the system have
270 * claimed their memory after the kernel image.
271 */
272void __init mem_init(void)
273{
274#ifdef CONFIG_ARM_LPAE
275 swiotlb_init(max_pfn > arm_dma_pfn_limit, SWIOTLB_VERBOSE);
276#endif
277
278 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
279
280 /* this will put all unused low memory onto the freelists */
281 memblock_free_all();
282
283#ifdef CONFIG_SA1111
284 /* now that our DMA memory is actually so designated, we can free it */
285 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
286#endif
287
288 free_highpages();
289
290 /*
291 * Check boundaries twice: Some fundamental inconsistencies can
292 * be detected at build time already.
293 */
294#ifdef CONFIG_MMU
295 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
296 BUG_ON(TASK_SIZE > MODULES_VADDR);
297#endif
298
299#ifdef CONFIG_HIGHMEM
300 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
301 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
302#endif
303}
304
305#ifdef CONFIG_STRICT_KERNEL_RWX
306struct section_perm {
307 const char *name;
308 unsigned long start;
309 unsigned long end;
310 pmdval_t mask;
311 pmdval_t prot;
312 pmdval_t clear;
313};
314
315/* First section-aligned location at or after __start_rodata. */
316extern char __start_rodata_section_aligned[];
317
318static struct section_perm nx_perms[] = {
319 /* Make pages tables, etc before _stext RW (set NX). */
320 {
321 .name = "pre-text NX",
322 .start = PAGE_OFFSET,
323 .end = (unsigned long)_stext,
324 .mask = ~PMD_SECT_XN,
325 .prot = PMD_SECT_XN,
326 },
327 /* Make init RW (set NX). */
328 {
329 .name = "init NX",
330 .start = (unsigned long)__init_begin,
331 .end = (unsigned long)_sdata,
332 .mask = ~PMD_SECT_XN,
333 .prot = PMD_SECT_XN,
334 },
335 /* Make rodata NX (set RO in ro_perms below). */
336 {
337 .name = "rodata NX",
338 .start = (unsigned long)__start_rodata_section_aligned,
339 .end = (unsigned long)__init_begin,
340 .mask = ~PMD_SECT_XN,
341 .prot = PMD_SECT_XN,
342 },
343};
344
345static struct section_perm ro_perms[] = {
346 /* Make kernel code and rodata RX (set RO). */
347 {
348 .name = "text/rodata RO",
349 .start = (unsigned long)_stext,
350 .end = (unsigned long)__init_begin,
351#ifdef CONFIG_ARM_LPAE
352 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
353 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
354#else
355 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
356 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
357 .clear = PMD_SECT_AP_WRITE,
358#endif
359 },
360};
361
362/*
363 * Updates section permissions only for the current mm (sections are
364 * copied into each mm). During startup, this is the init_mm. Is only
365 * safe to be called with preemption disabled, as under stop_machine().
366 */
367static inline void section_update(unsigned long addr, pmdval_t mask,
368 pmdval_t prot, struct mm_struct *mm)
369{
370 pmd_t *pmd;
371
372 pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
373
374#ifdef CONFIG_ARM_LPAE
375 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
376#else
377 if (addr & SECTION_SIZE)
378 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
379 else
380 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
381#endif
382 flush_pmd_entry(pmd);
383 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
384}
385
386/* Make sure extended page tables are in use. */
387static inline bool arch_has_strict_perms(void)
388{
389 if (cpu_architecture() < CPU_ARCH_ARMv6)
390 return false;
391
392 return !!(get_cr() & CR_XP);
393}
394
395static void set_section_perms(struct section_perm *perms, int n, bool set,
396 struct mm_struct *mm)
397{
398 size_t i;
399 unsigned long addr;
400
401 if (!arch_has_strict_perms())
402 return;
403
404 for (i = 0; i < n; i++) {
405 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
406 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
407 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
408 perms[i].name, perms[i].start, perms[i].end,
409 SECTION_SIZE);
410 continue;
411 }
412
413 for (addr = perms[i].start;
414 addr < perms[i].end;
415 addr += SECTION_SIZE)
416 section_update(addr, perms[i].mask,
417 set ? perms[i].prot : perms[i].clear, mm);
418 }
419
420}
421
422/*
423 * update_sections_early intended to be called only through stop_machine
424 * framework and executed by only one CPU while all other CPUs will spin and
425 * wait, so no locking is required in this function.
426 */
427static void update_sections_early(struct section_perm perms[], int n)
428{
429 struct task_struct *t, *s;
430
431 for_each_process(t) {
432 if (t->flags & PF_KTHREAD)
433 continue;
434 for_each_thread(t, s)
435 if (s->mm)
436 set_section_perms(perms, n, true, s->mm);
437 }
438 set_section_perms(perms, n, true, current->active_mm);
439 set_section_perms(perms, n, true, &init_mm);
440}
441
442static int __fix_kernmem_perms(void *unused)
443{
444 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
445 return 0;
446}
447
448static void fix_kernmem_perms(void)
449{
450 stop_machine(__fix_kernmem_perms, NULL, NULL);
451}
452
453static int __mark_rodata_ro(void *unused)
454{
455 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
456 return 0;
457}
458
459void mark_rodata_ro(void)
460{
461 stop_machine(__mark_rodata_ro, NULL, NULL);
462 arm_debug_checkwx();
463}
464
465#else
466static inline void fix_kernmem_perms(void) { }
467#endif /* CONFIG_STRICT_KERNEL_RWX */
468
469void free_initmem(void)
470{
471 fix_kernmem_perms();
472
473 poison_init_mem(__init_begin, __init_end - __init_begin);
474 if (!machine_is_integrator() && !machine_is_cintegrator())
475 free_initmem_default(-1);
476}
477
478#ifdef CONFIG_BLK_DEV_INITRD
479void free_initrd_mem(unsigned long start, unsigned long end)
480{
481 if (start == initrd_start)
482 start = round_down(start, PAGE_SIZE);
483 if (end == initrd_end)
484 end = round_up(end, PAGE_SIZE);
485
486 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
487 free_reserved_area((void *)start, (void *)end, -1, "initrd");
488}
489#endif
490
491#ifdef CONFIG_EXECMEM
492
493#ifdef CONFIG_XIP_KERNEL
494/*
495 * The XIP kernel text is mapped in the module area for modules and
496 * some other stuff to work without any indirect relocations.
497 * MODULES_VADDR is redefined here and not in asm/memory.h to avoid
498 * recompiling the whole kernel when CONFIG_XIP_KERNEL is turned on/off.
499 */
500#undef MODULES_VADDR
501#define MODULES_VADDR (((unsigned long)_exiprom + ~PMD_MASK) & PMD_MASK)
502#endif
503
504#ifdef CONFIG_MMU
505static struct execmem_info execmem_info __ro_after_init;
506
507struct execmem_info __init *execmem_arch_setup(void)
508{
509 unsigned long fallback_start = 0, fallback_end = 0;
510
511 if (IS_ENABLED(CONFIG_ARM_MODULE_PLTS)) {
512 fallback_start = VMALLOC_START;
513 fallback_end = VMALLOC_END;
514 }
515
516 execmem_info = (struct execmem_info){
517 .ranges = {
518 [EXECMEM_DEFAULT] = {
519 .start = MODULES_VADDR,
520 .end = MODULES_END,
521 .pgprot = PAGE_KERNEL_EXEC,
522 .alignment = 1,
523 .fallback_start = fallback_start,
524 .fallback_end = fallback_end,
525 },
526 },
527 };
528
529 return &execmem_info;
530}
531#endif /* CONFIG_MMU */
532
533#endif /* CONFIG_EXECMEM */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 */
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/swap.h>
10#include <linux/init.h>
11#include <linux/mman.h>
12#include <linux/sched/signal.h>
13#include <linux/sched/task.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/initrd.h>
17#include <linux/of_fdt.h>
18#include <linux/highmem.h>
19#include <linux/gfp.h>
20#include <linux/memblock.h>
21#include <linux/dma-contiguous.h>
22#include <linux/sizes.h>
23#include <linux/stop_machine.h>
24#include <linux/swiotlb.h>
25
26#include <asm/cp15.h>
27#include <asm/mach-types.h>
28#include <asm/memblock.h>
29#include <asm/memory.h>
30#include <asm/prom.h>
31#include <asm/sections.h>
32#include <asm/setup.h>
33#include <asm/system_info.h>
34#include <asm/tlb.h>
35#include <asm/fixmap.h>
36#include <asm/ptdump.h>
37
38#include <asm/mach/arch.h>
39#include <asm/mach/map.h>
40
41#include "mm.h"
42
43#ifdef CONFIG_CPU_CP15_MMU
44unsigned long __init __clear_cr(unsigned long mask)
45{
46 cr_alignment = cr_alignment & ~mask;
47 return cr_alignment;
48}
49#endif
50
51#ifdef CONFIG_BLK_DEV_INITRD
52static int __init parse_tag_initrd(const struct tag *tag)
53{
54 pr_warn("ATAG_INITRD is deprecated; "
55 "please update your bootloader.\n");
56 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
57 phys_initrd_size = tag->u.initrd.size;
58 return 0;
59}
60
61__tagtable(ATAG_INITRD, parse_tag_initrd);
62
63static int __init parse_tag_initrd2(const struct tag *tag)
64{
65 phys_initrd_start = tag->u.initrd.start;
66 phys_initrd_size = tag->u.initrd.size;
67 return 0;
68}
69
70__tagtable(ATAG_INITRD2, parse_tag_initrd2);
71#endif
72
73static void __init find_limits(unsigned long *min, unsigned long *max_low,
74 unsigned long *max_high)
75{
76 *max_low = PFN_DOWN(memblock_get_current_limit());
77 *min = PFN_UP(memblock_start_of_DRAM());
78 *max_high = PFN_DOWN(memblock_end_of_DRAM());
79}
80
81#ifdef CONFIG_ZONE_DMA
82
83phys_addr_t arm_dma_zone_size __read_mostly;
84EXPORT_SYMBOL(arm_dma_zone_size);
85
86/*
87 * The DMA mask corresponding to the maximum bus address allocatable
88 * using GFP_DMA. The default here places no restriction on DMA
89 * allocations. This must be the smallest DMA mask in the system,
90 * so a successful GFP_DMA allocation will always satisfy this.
91 */
92phys_addr_t arm_dma_limit;
93unsigned long arm_dma_pfn_limit;
94
95static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
96 unsigned long dma_size)
97{
98 if (size[0] <= dma_size)
99 return;
100
101 size[ZONE_NORMAL] = size[0] - dma_size;
102 size[ZONE_DMA] = dma_size;
103 hole[ZONE_NORMAL] = hole[0];
104 hole[ZONE_DMA] = 0;
105}
106#endif
107
108void __init setup_dma_zone(const struct machine_desc *mdesc)
109{
110#ifdef CONFIG_ZONE_DMA
111 if (mdesc->dma_zone_size) {
112 arm_dma_zone_size = mdesc->dma_zone_size;
113 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
114 } else
115 arm_dma_limit = 0xffffffff;
116 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
117#endif
118}
119
120static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
121 unsigned long max_high)
122{
123 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
124 struct memblock_region *reg;
125
126 /*
127 * initialise the zones.
128 */
129 memset(zone_size, 0, sizeof(zone_size));
130
131 /*
132 * The memory size has already been determined. If we need
133 * to do anything fancy with the allocation of this memory
134 * to the zones, now is the time to do it.
135 */
136 zone_size[0] = max_low - min;
137#ifdef CONFIG_HIGHMEM
138 zone_size[ZONE_HIGHMEM] = max_high - max_low;
139#endif
140
141 /*
142 * Calculate the size of the holes.
143 * holes = node_size - sum(bank_sizes)
144 */
145 memcpy(zhole_size, zone_size, sizeof(zhole_size));
146 for_each_memblock(memory, reg) {
147 unsigned long start = memblock_region_memory_base_pfn(reg);
148 unsigned long end = memblock_region_memory_end_pfn(reg);
149
150 if (start < max_low) {
151 unsigned long low_end = min(end, max_low);
152 zhole_size[0] -= low_end - start;
153 }
154#ifdef CONFIG_HIGHMEM
155 if (end > max_low) {
156 unsigned long high_start = max(start, max_low);
157 zhole_size[ZONE_HIGHMEM] -= end - high_start;
158 }
159#endif
160 }
161
162#ifdef CONFIG_ZONE_DMA
163 /*
164 * Adjust the sizes according to any special requirements for
165 * this machine type.
166 */
167 if (arm_dma_zone_size)
168 arm_adjust_dma_zone(zone_size, zhole_size,
169 arm_dma_zone_size >> PAGE_SHIFT);
170#endif
171
172 free_area_init_node(0, zone_size, min, zhole_size);
173}
174
175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
176int pfn_valid(unsigned long pfn)
177{
178 phys_addr_t addr = __pfn_to_phys(pfn);
179
180 if (__phys_to_pfn(addr) != pfn)
181 return 0;
182
183 return memblock_is_map_memory(__pfn_to_phys(pfn));
184}
185EXPORT_SYMBOL(pfn_valid);
186#endif
187
188static bool arm_memblock_steal_permitted = true;
189
190phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
191{
192 phys_addr_t phys;
193
194 BUG_ON(!arm_memblock_steal_permitted);
195
196 phys = memblock_phys_alloc(size, align);
197 if (!phys)
198 panic("Failed to steal %pa bytes at %pS\n",
199 &size, (void *)_RET_IP_);
200
201 memblock_free(phys, size);
202 memblock_remove(phys, size);
203
204 return phys;
205}
206
207static void __init arm_initrd_init(void)
208{
209#ifdef CONFIG_BLK_DEV_INITRD
210 phys_addr_t start;
211 unsigned long size;
212
213 initrd_start = initrd_end = 0;
214
215 if (!phys_initrd_size)
216 return;
217
218 /*
219 * Round the memory region to page boundaries as per free_initrd_mem()
220 * This allows us to detect whether the pages overlapping the initrd
221 * are in use, but more importantly, reserves the entire set of pages
222 * as we don't want these pages allocated for other purposes.
223 */
224 start = round_down(phys_initrd_start, PAGE_SIZE);
225 size = phys_initrd_size + (phys_initrd_start - start);
226 size = round_up(size, PAGE_SIZE);
227
228 if (!memblock_is_region_memory(start, size)) {
229 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
230 (u64)start, size);
231 return;
232 }
233
234 if (memblock_is_region_reserved(start, size)) {
235 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
236 (u64)start, size);
237 return;
238 }
239
240 memblock_reserve(start, size);
241
242 /* Now convert initrd to virtual addresses */
243 initrd_start = __phys_to_virt(phys_initrd_start);
244 initrd_end = initrd_start + phys_initrd_size;
245#endif
246}
247
248#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
249void check_cpu_icache_size(int cpuid)
250{
251 u32 size, ctr;
252
253 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
254
255 size = 1 << ((ctr & 0xf) + 2);
256 if (cpuid != 0 && icache_size != size)
257 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
258 cpuid);
259 if (icache_size > size)
260 icache_size = size;
261}
262#endif
263
264void __init arm_memblock_init(const struct machine_desc *mdesc)
265{
266 /* Register the kernel text, kernel data and initrd with memblock. */
267 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
268
269 arm_initrd_init();
270
271 arm_mm_memblock_reserve();
272
273 /* reserve any platform specific memblock areas */
274 if (mdesc->reserve)
275 mdesc->reserve();
276
277 early_init_fdt_reserve_self();
278 early_init_fdt_scan_reserved_mem();
279
280 /* reserve memory for DMA contiguous allocations */
281 dma_contiguous_reserve(arm_dma_limit);
282
283 arm_memblock_steal_permitted = false;
284 memblock_dump_all();
285}
286
287void __init bootmem_init(void)
288{
289 memblock_allow_resize();
290
291 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
292
293 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
294 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
295
296 /*
297 * Sparsemem tries to allocate bootmem in memory_present(),
298 * so must be done after the fixed reservations
299 */
300 memblocks_present();
301
302 /*
303 * sparse_init() needs the bootmem allocator up and running.
304 */
305 sparse_init();
306
307 /*
308 * Now free the memory - free_area_init_node needs
309 * the sparse mem_map arrays initialized by sparse_init()
310 * for memmap_init_zone(), otherwise all PFNs are invalid.
311 */
312 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
313}
314
315/*
316 * Poison init memory with an undefined instruction (ARM) or a branch to an
317 * undefined instruction (Thumb).
318 */
319static inline void poison_init_mem(void *s, size_t count)
320{
321 u32 *p = (u32 *)s;
322 for (; count != 0; count -= 4)
323 *p++ = 0xe7fddef0;
324}
325
326static inline void
327free_memmap(unsigned long start_pfn, unsigned long end_pfn)
328{
329 struct page *start_pg, *end_pg;
330 phys_addr_t pg, pgend;
331
332 /*
333 * Convert start_pfn/end_pfn to a struct page pointer.
334 */
335 start_pg = pfn_to_page(start_pfn - 1) + 1;
336 end_pg = pfn_to_page(end_pfn - 1) + 1;
337
338 /*
339 * Convert to physical addresses, and
340 * round start upwards and end downwards.
341 */
342 pg = PAGE_ALIGN(__pa(start_pg));
343 pgend = __pa(end_pg) & PAGE_MASK;
344
345 /*
346 * If there are free pages between these,
347 * free the section of the memmap array.
348 */
349 if (pg < pgend)
350 memblock_free_early(pg, pgend - pg);
351}
352
353/*
354 * The mem_map array can get very big. Free the unused area of the memory map.
355 */
356static void __init free_unused_memmap(void)
357{
358 unsigned long start, prev_end = 0;
359 struct memblock_region *reg;
360
361 /*
362 * This relies on each bank being in address order.
363 * The banks are sorted previously in bootmem_init().
364 */
365 for_each_memblock(memory, reg) {
366 start = memblock_region_memory_base_pfn(reg);
367
368#ifdef CONFIG_SPARSEMEM
369 /*
370 * Take care not to free memmap entries that don't exist
371 * due to SPARSEMEM sections which aren't present.
372 */
373 start = min(start,
374 ALIGN(prev_end, PAGES_PER_SECTION));
375#else
376 /*
377 * Align down here since the VM subsystem insists that the
378 * memmap entries are valid from the bank start aligned to
379 * MAX_ORDER_NR_PAGES.
380 */
381 start = round_down(start, MAX_ORDER_NR_PAGES);
382#endif
383 /*
384 * If we had a previous bank, and there is a space
385 * between the current bank and the previous, free it.
386 */
387 if (prev_end && prev_end < start)
388 free_memmap(prev_end, start);
389
390 /*
391 * Align up here since the VM subsystem insists that the
392 * memmap entries are valid from the bank end aligned to
393 * MAX_ORDER_NR_PAGES.
394 */
395 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
396 MAX_ORDER_NR_PAGES);
397 }
398
399#ifdef CONFIG_SPARSEMEM
400 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
401 free_memmap(prev_end,
402 ALIGN(prev_end, PAGES_PER_SECTION));
403#endif
404}
405
406#ifdef CONFIG_HIGHMEM
407static inline void free_area_high(unsigned long pfn, unsigned long end)
408{
409 for (; pfn < end; pfn++)
410 free_highmem_page(pfn_to_page(pfn));
411}
412#endif
413
414static void __init free_highpages(void)
415{
416#ifdef CONFIG_HIGHMEM
417 unsigned long max_low = max_low_pfn;
418 struct memblock_region *mem, *res;
419
420 /* set highmem page free */
421 for_each_memblock(memory, mem) {
422 unsigned long start = memblock_region_memory_base_pfn(mem);
423 unsigned long end = memblock_region_memory_end_pfn(mem);
424
425 /* Ignore complete lowmem entries */
426 if (end <= max_low)
427 continue;
428
429 if (memblock_is_nomap(mem))
430 continue;
431
432 /* Truncate partial highmem entries */
433 if (start < max_low)
434 start = max_low;
435
436 /* Find and exclude any reserved regions */
437 for_each_memblock(reserved, res) {
438 unsigned long res_start, res_end;
439
440 res_start = memblock_region_reserved_base_pfn(res);
441 res_end = memblock_region_reserved_end_pfn(res);
442
443 if (res_end < start)
444 continue;
445 if (res_start < start)
446 res_start = start;
447 if (res_start > end)
448 res_start = end;
449 if (res_end > end)
450 res_end = end;
451 if (res_start != start)
452 free_area_high(start, res_start);
453 start = res_end;
454 if (start == end)
455 break;
456 }
457
458 /* And now free anything which remains */
459 if (start < end)
460 free_area_high(start, end);
461 }
462#endif
463}
464
465/*
466 * mem_init() marks the free areas in the mem_map and tells us how much
467 * memory is free. This is done after various parts of the system have
468 * claimed their memory after the kernel image.
469 */
470void __init mem_init(void)
471{
472#ifdef CONFIG_ARM_LPAE
473 swiotlb_init(1);
474#endif
475
476 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
477
478 /* this will put all unused low memory onto the freelists */
479 free_unused_memmap();
480 memblock_free_all();
481
482#ifdef CONFIG_SA1111
483 /* now that our DMA memory is actually so designated, we can free it */
484 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
485#endif
486
487 free_highpages();
488
489 mem_init_print_info(NULL);
490
491 /*
492 * Check boundaries twice: Some fundamental inconsistencies can
493 * be detected at build time already.
494 */
495#ifdef CONFIG_MMU
496 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
497 BUG_ON(TASK_SIZE > MODULES_VADDR);
498#endif
499
500#ifdef CONFIG_HIGHMEM
501 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
502 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
503#endif
504}
505
506#ifdef CONFIG_STRICT_KERNEL_RWX
507struct section_perm {
508 const char *name;
509 unsigned long start;
510 unsigned long end;
511 pmdval_t mask;
512 pmdval_t prot;
513 pmdval_t clear;
514};
515
516/* First section-aligned location at or after __start_rodata. */
517extern char __start_rodata_section_aligned[];
518
519static struct section_perm nx_perms[] = {
520 /* Make pages tables, etc before _stext RW (set NX). */
521 {
522 .name = "pre-text NX",
523 .start = PAGE_OFFSET,
524 .end = (unsigned long)_stext,
525 .mask = ~PMD_SECT_XN,
526 .prot = PMD_SECT_XN,
527 },
528 /* Make init RW (set NX). */
529 {
530 .name = "init NX",
531 .start = (unsigned long)__init_begin,
532 .end = (unsigned long)_sdata,
533 .mask = ~PMD_SECT_XN,
534 .prot = PMD_SECT_XN,
535 },
536 /* Make rodata NX (set RO in ro_perms below). */
537 {
538 .name = "rodata NX",
539 .start = (unsigned long)__start_rodata_section_aligned,
540 .end = (unsigned long)__init_begin,
541 .mask = ~PMD_SECT_XN,
542 .prot = PMD_SECT_XN,
543 },
544};
545
546static struct section_perm ro_perms[] = {
547 /* Make kernel code and rodata RX (set RO). */
548 {
549 .name = "text/rodata RO",
550 .start = (unsigned long)_stext,
551 .end = (unsigned long)__init_begin,
552#ifdef CONFIG_ARM_LPAE
553 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
554 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
555#else
556 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
557 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
558 .clear = PMD_SECT_AP_WRITE,
559#endif
560 },
561};
562
563/*
564 * Updates section permissions only for the current mm (sections are
565 * copied into each mm). During startup, this is the init_mm. Is only
566 * safe to be called with preemption disabled, as under stop_machine().
567 */
568static inline void section_update(unsigned long addr, pmdval_t mask,
569 pmdval_t prot, struct mm_struct *mm)
570{
571 pmd_t *pmd;
572
573 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
574
575#ifdef CONFIG_ARM_LPAE
576 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
577#else
578 if (addr & SECTION_SIZE)
579 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
580 else
581 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
582#endif
583 flush_pmd_entry(pmd);
584 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
585}
586
587/* Make sure extended page tables are in use. */
588static inline bool arch_has_strict_perms(void)
589{
590 if (cpu_architecture() < CPU_ARCH_ARMv6)
591 return false;
592
593 return !!(get_cr() & CR_XP);
594}
595
596void set_section_perms(struct section_perm *perms, int n, bool set,
597 struct mm_struct *mm)
598{
599 size_t i;
600 unsigned long addr;
601
602 if (!arch_has_strict_perms())
603 return;
604
605 for (i = 0; i < n; i++) {
606 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
607 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
608 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
609 perms[i].name, perms[i].start, perms[i].end,
610 SECTION_SIZE);
611 continue;
612 }
613
614 for (addr = perms[i].start;
615 addr < perms[i].end;
616 addr += SECTION_SIZE)
617 section_update(addr, perms[i].mask,
618 set ? perms[i].prot : perms[i].clear, mm);
619 }
620
621}
622
623/**
624 * update_sections_early intended to be called only through stop_machine
625 * framework and executed by only one CPU while all other CPUs will spin and
626 * wait, so no locking is required in this function.
627 */
628static void update_sections_early(struct section_perm perms[], int n)
629{
630 struct task_struct *t, *s;
631
632 for_each_process(t) {
633 if (t->flags & PF_KTHREAD)
634 continue;
635 for_each_thread(t, s)
636 if (s->mm)
637 set_section_perms(perms, n, true, s->mm);
638 }
639 set_section_perms(perms, n, true, current->active_mm);
640 set_section_perms(perms, n, true, &init_mm);
641}
642
643static int __fix_kernmem_perms(void *unused)
644{
645 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
646 return 0;
647}
648
649static void fix_kernmem_perms(void)
650{
651 stop_machine(__fix_kernmem_perms, NULL, NULL);
652}
653
654static int __mark_rodata_ro(void *unused)
655{
656 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
657 return 0;
658}
659
660static int kernel_set_to_readonly __read_mostly;
661
662void mark_rodata_ro(void)
663{
664 kernel_set_to_readonly = 1;
665 stop_machine(__mark_rodata_ro, NULL, NULL);
666 debug_checkwx();
667}
668
669void set_kernel_text_rw(void)
670{
671 if (!kernel_set_to_readonly)
672 return;
673
674 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
675 current->active_mm);
676}
677
678void set_kernel_text_ro(void)
679{
680 if (!kernel_set_to_readonly)
681 return;
682
683 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
684 current->active_mm);
685}
686
687#else
688static inline void fix_kernmem_perms(void) { }
689#endif /* CONFIG_STRICT_KERNEL_RWX */
690
691void free_initmem(void)
692{
693 fix_kernmem_perms();
694
695 poison_init_mem(__init_begin, __init_end - __init_begin);
696 if (!machine_is_integrator() && !machine_is_cintegrator())
697 free_initmem_default(-1);
698}
699
700#ifdef CONFIG_BLK_DEV_INITRD
701void free_initrd_mem(unsigned long start, unsigned long end)
702{
703 if (start == initrd_start)
704 start = round_down(start, PAGE_SIZE);
705 if (end == initrd_end)
706 end = round_up(end, PAGE_SIZE);
707
708 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
709 free_reserved_area((void *)start, (void *)end, -1, "initrd");
710}
711#endif