Loading...
1/*
2 * linux/arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/nodemask.h>
17#include <linux/initrd.h>
18#include <linux/of_fdt.h>
19#include <linux/highmem.h>
20#include <linux/gfp.h>
21#include <linux/memblock.h>
22#include <linux/sort.h>
23
24#include <asm/mach-types.h>
25#include <asm/prom.h>
26#include <asm/sections.h>
27#include <asm/setup.h>
28#include <asm/sizes.h>
29#include <asm/tlb.h>
30#include <asm/fixmap.h>
31
32#include <asm/mach/arch.h>
33#include <asm/mach/map.h>
34
35#include "mm.h"
36
37static unsigned long phys_initrd_start __initdata = 0;
38static unsigned long phys_initrd_size __initdata = 0;
39
40static int __init early_initrd(char *p)
41{
42 unsigned long start, size;
43 char *endp;
44
45 start = memparse(p, &endp);
46 if (*endp == ',') {
47 size = memparse(endp + 1, NULL);
48
49 phys_initrd_start = start;
50 phys_initrd_size = size;
51 }
52 return 0;
53}
54early_param("initrd", early_initrd);
55
56static int __init parse_tag_initrd(const struct tag *tag)
57{
58 printk(KERN_WARNING "ATAG_INITRD is deprecated; "
59 "please update your bootloader.\n");
60 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
61 phys_initrd_size = tag->u.initrd.size;
62 return 0;
63}
64
65__tagtable(ATAG_INITRD, parse_tag_initrd);
66
67static int __init parse_tag_initrd2(const struct tag *tag)
68{
69 phys_initrd_start = tag->u.initrd.start;
70 phys_initrd_size = tag->u.initrd.size;
71 return 0;
72}
73
74__tagtable(ATAG_INITRD2, parse_tag_initrd2);
75
76#ifdef CONFIG_OF_FLATTREE
77void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
78{
79 phys_initrd_start = start;
80 phys_initrd_size = end - start;
81}
82#endif /* CONFIG_OF_FLATTREE */
83
84/*
85 * This keeps memory configuration data used by a couple memory
86 * initialization functions, as well as show_mem() for the skipping
87 * of holes in the memory map. It is populated by arm_add_memory().
88 */
89struct meminfo meminfo;
90
91void show_mem(unsigned int filter)
92{
93 int free = 0, total = 0, reserved = 0;
94 int shared = 0, cached = 0, slab = 0, i;
95 struct meminfo * mi = &meminfo;
96
97 printk("Mem-info:\n");
98 show_free_areas(filter);
99
100 for_each_bank (i, mi) {
101 struct membank *bank = &mi->bank[i];
102 unsigned int pfn1, pfn2;
103 struct page *page, *end;
104
105 pfn1 = bank_pfn_start(bank);
106 pfn2 = bank_pfn_end(bank);
107
108 page = pfn_to_page(pfn1);
109 end = pfn_to_page(pfn2 - 1) + 1;
110
111 do {
112 total++;
113 if (PageReserved(page))
114 reserved++;
115 else if (PageSwapCache(page))
116 cached++;
117 else if (PageSlab(page))
118 slab++;
119 else if (!page_count(page))
120 free++;
121 else
122 shared += page_count(page) - 1;
123 page++;
124 } while (page < end);
125 }
126
127 printk("%d pages of RAM\n", total);
128 printk("%d free pages\n", free);
129 printk("%d reserved pages\n", reserved);
130 printk("%d slab pages\n", slab);
131 printk("%d pages shared\n", shared);
132 printk("%d pages swap cached\n", cached);
133}
134
135static void __init find_limits(unsigned long *min, unsigned long *max_low,
136 unsigned long *max_high)
137{
138 struct meminfo *mi = &meminfo;
139 int i;
140
141 *min = -1UL;
142 *max_low = *max_high = 0;
143
144 for_each_bank (i, mi) {
145 struct membank *bank = &mi->bank[i];
146 unsigned long start, end;
147
148 start = bank_pfn_start(bank);
149 end = bank_pfn_end(bank);
150
151 if (*min > start)
152 *min = start;
153 if (*max_high < end)
154 *max_high = end;
155 if (bank->highmem)
156 continue;
157 if (*max_low < end)
158 *max_low = end;
159 }
160}
161
162static void __init arm_bootmem_init(unsigned long start_pfn,
163 unsigned long end_pfn)
164{
165 struct memblock_region *reg;
166 unsigned int boot_pages;
167 phys_addr_t bitmap;
168 pg_data_t *pgdat;
169
170 /*
171 * Allocate the bootmem bitmap page. This must be in a region
172 * of memory which has already been mapped.
173 */
174 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
175 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
176 __pfn_to_phys(end_pfn));
177
178 /*
179 * Initialise the bootmem allocator, handing the
180 * memory banks over to bootmem.
181 */
182 node_set_online(0);
183 pgdat = NODE_DATA(0);
184 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
185
186 /* Free the lowmem regions from memblock into bootmem. */
187 for_each_memblock(memory, reg) {
188 unsigned long start = memblock_region_memory_base_pfn(reg);
189 unsigned long end = memblock_region_memory_end_pfn(reg);
190
191 if (end >= end_pfn)
192 end = end_pfn;
193 if (start >= end)
194 break;
195
196 free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
197 }
198
199 /* Reserve the lowmem memblock reserved regions in bootmem. */
200 for_each_memblock(reserved, reg) {
201 unsigned long start = memblock_region_reserved_base_pfn(reg);
202 unsigned long end = memblock_region_reserved_end_pfn(reg);
203
204 if (end >= end_pfn)
205 end = end_pfn;
206 if (start >= end)
207 break;
208
209 reserve_bootmem(__pfn_to_phys(start),
210 (end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
211 }
212}
213
214#ifdef CONFIG_ZONE_DMA
215
216unsigned long arm_dma_zone_size __read_mostly;
217EXPORT_SYMBOL(arm_dma_zone_size);
218
219/*
220 * The DMA mask corresponding to the maximum bus address allocatable
221 * using GFP_DMA. The default here places no restriction on DMA
222 * allocations. This must be the smallest DMA mask in the system,
223 * so a successful GFP_DMA allocation will always satisfy this.
224 */
225u32 arm_dma_limit;
226
227static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
228 unsigned long dma_size)
229{
230 if (size[0] <= dma_size)
231 return;
232
233 size[ZONE_NORMAL] = size[0] - dma_size;
234 size[ZONE_DMA] = dma_size;
235 hole[ZONE_NORMAL] = hole[0];
236 hole[ZONE_DMA] = 0;
237}
238#endif
239
240static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
241 unsigned long max_high)
242{
243 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
244 struct memblock_region *reg;
245
246 /*
247 * initialise the zones.
248 */
249 memset(zone_size, 0, sizeof(zone_size));
250
251 /*
252 * The memory size has already been determined. If we need
253 * to do anything fancy with the allocation of this memory
254 * to the zones, now is the time to do it.
255 */
256 zone_size[0] = max_low - min;
257#ifdef CONFIG_HIGHMEM
258 zone_size[ZONE_HIGHMEM] = max_high - max_low;
259#endif
260
261 /*
262 * Calculate the size of the holes.
263 * holes = node_size - sum(bank_sizes)
264 */
265 memcpy(zhole_size, zone_size, sizeof(zhole_size));
266 for_each_memblock(memory, reg) {
267 unsigned long start = memblock_region_memory_base_pfn(reg);
268 unsigned long end = memblock_region_memory_end_pfn(reg);
269
270 if (start < max_low) {
271 unsigned long low_end = min(end, max_low);
272 zhole_size[0] -= low_end - start;
273 }
274#ifdef CONFIG_HIGHMEM
275 if (end > max_low) {
276 unsigned long high_start = max(start, max_low);
277 zhole_size[ZONE_HIGHMEM] -= end - high_start;
278 }
279#endif
280 }
281
282#ifdef CONFIG_ZONE_DMA
283 /*
284 * Adjust the sizes according to any special requirements for
285 * this machine type.
286 */
287 if (arm_dma_zone_size) {
288 arm_adjust_dma_zone(zone_size, zhole_size,
289 arm_dma_zone_size >> PAGE_SHIFT);
290 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
291 } else
292 arm_dma_limit = 0xffffffff;
293#endif
294
295 free_area_init_node(0, zone_size, min, zhole_size);
296}
297
298#ifdef CONFIG_HAVE_ARCH_PFN_VALID
299int pfn_valid(unsigned long pfn)
300{
301 return memblock_is_memory(__pfn_to_phys(pfn));
302}
303EXPORT_SYMBOL(pfn_valid);
304#endif
305
306#ifndef CONFIG_SPARSEMEM
307static void arm_memory_present(void)
308{
309}
310#else
311static void arm_memory_present(void)
312{
313 struct memblock_region *reg;
314
315 for_each_memblock(memory, reg)
316 memory_present(0, memblock_region_memory_base_pfn(reg),
317 memblock_region_memory_end_pfn(reg));
318}
319#endif
320
321static int __init meminfo_cmp(const void *_a, const void *_b)
322{
323 const struct membank *a = _a, *b = _b;
324 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
325 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
326}
327
328void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
329{
330 int i;
331
332 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
333
334 memblock_init();
335 for (i = 0; i < mi->nr_banks; i++)
336 memblock_add(mi->bank[i].start, mi->bank[i].size);
337
338 /* Register the kernel text, kernel data and initrd with memblock. */
339#ifdef CONFIG_XIP_KERNEL
340 memblock_reserve(__pa(_sdata), _end - _sdata);
341#else
342 memblock_reserve(__pa(_stext), _end - _stext);
343#endif
344#ifdef CONFIG_BLK_DEV_INITRD
345 if (phys_initrd_size &&
346 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
347 pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
348 phys_initrd_start, phys_initrd_size);
349 phys_initrd_start = phys_initrd_size = 0;
350 }
351 if (phys_initrd_size &&
352 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
353 pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
354 phys_initrd_start, phys_initrd_size);
355 phys_initrd_start = phys_initrd_size = 0;
356 }
357 if (phys_initrd_size) {
358 memblock_reserve(phys_initrd_start, phys_initrd_size);
359
360 /* Now convert initrd to virtual addresses */
361 initrd_start = __phys_to_virt(phys_initrd_start);
362 initrd_end = initrd_start + phys_initrd_size;
363 }
364#endif
365
366 arm_mm_memblock_reserve();
367 arm_dt_memblock_reserve();
368
369 /* reserve any platform specific memblock areas */
370 if (mdesc->reserve)
371 mdesc->reserve();
372
373 memblock_analyze();
374 memblock_dump_all();
375}
376
377void __init bootmem_init(void)
378{
379 unsigned long min, max_low, max_high;
380
381 max_low = max_high = 0;
382
383 find_limits(&min, &max_low, &max_high);
384
385 arm_bootmem_init(min, max_low);
386
387 /*
388 * Sparsemem tries to allocate bootmem in memory_present(),
389 * so must be done after the fixed reservations
390 */
391 arm_memory_present();
392
393 /*
394 * sparse_init() needs the bootmem allocator up and running.
395 */
396 sparse_init();
397
398 /*
399 * Now free the memory - free_area_init_node needs
400 * the sparse mem_map arrays initialized by sparse_init()
401 * for memmap_init_zone(), otherwise all PFNs are invalid.
402 */
403 arm_bootmem_free(min, max_low, max_high);
404
405 high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1;
406
407 /*
408 * This doesn't seem to be used by the Linux memory manager any
409 * more, but is used by ll_rw_block. If we can get rid of it, we
410 * also get rid of some of the stuff above as well.
411 *
412 * Note: max_low_pfn and max_pfn reflect the number of _pages_ in
413 * the system, not the maximum PFN.
414 */
415 max_low_pfn = max_low - PHYS_PFN_OFFSET;
416 max_pfn = max_high - PHYS_PFN_OFFSET;
417}
418
419static inline int free_area(unsigned long pfn, unsigned long end, char *s)
420{
421 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
422
423 for (; pfn < end; pfn++) {
424 struct page *page = pfn_to_page(pfn);
425 ClearPageReserved(page);
426 init_page_count(page);
427 __free_page(page);
428 pages++;
429 }
430
431 if (size && s)
432 printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
433
434 return pages;
435}
436
437/*
438 * Poison init memory with an undefined instruction (ARM) or a branch to an
439 * undefined instruction (Thumb).
440 */
441static inline void poison_init_mem(void *s, size_t count)
442{
443 u32 *p = (u32 *)s;
444 for (; count != 0; count -= 4)
445 *p++ = 0xe7fddef0;
446}
447
448static inline void
449free_memmap(unsigned long start_pfn, unsigned long end_pfn)
450{
451 struct page *start_pg, *end_pg;
452 unsigned long pg, pgend;
453
454 /*
455 * Convert start_pfn/end_pfn to a struct page pointer.
456 */
457 start_pg = pfn_to_page(start_pfn - 1) + 1;
458 end_pg = pfn_to_page(end_pfn - 1) + 1;
459
460 /*
461 * Convert to physical addresses, and
462 * round start upwards and end downwards.
463 */
464 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
465 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
466
467 /*
468 * If there are free pages between these,
469 * free the section of the memmap array.
470 */
471 if (pg < pgend)
472 free_bootmem(pg, pgend - pg);
473}
474
475/*
476 * The mem_map array can get very big. Free the unused area of the memory map.
477 */
478static void __init free_unused_memmap(struct meminfo *mi)
479{
480 unsigned long bank_start, prev_bank_end = 0;
481 unsigned int i;
482
483 /*
484 * This relies on each bank being in address order.
485 * The banks are sorted previously in bootmem_init().
486 */
487 for_each_bank(i, mi) {
488 struct membank *bank = &mi->bank[i];
489
490 bank_start = bank_pfn_start(bank);
491
492#ifdef CONFIG_SPARSEMEM
493 /*
494 * Take care not to free memmap entries that don't exist
495 * due to SPARSEMEM sections which aren't present.
496 */
497 bank_start = min(bank_start,
498 ALIGN(prev_bank_end, PAGES_PER_SECTION));
499#else
500 /*
501 * Align down here since the VM subsystem insists that the
502 * memmap entries are valid from the bank start aligned to
503 * MAX_ORDER_NR_PAGES.
504 */
505 bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
506#endif
507 /*
508 * If we had a previous bank, and there is a space
509 * between the current bank and the previous, free it.
510 */
511 if (prev_bank_end && prev_bank_end < bank_start)
512 free_memmap(prev_bank_end, bank_start);
513
514 /*
515 * Align up here since the VM subsystem insists that the
516 * memmap entries are valid from the bank end aligned to
517 * MAX_ORDER_NR_PAGES.
518 */
519 prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
520 }
521
522#ifdef CONFIG_SPARSEMEM
523 if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
524 free_memmap(prev_bank_end,
525 ALIGN(prev_bank_end, PAGES_PER_SECTION));
526#endif
527}
528
529static void __init free_highpages(void)
530{
531#ifdef CONFIG_HIGHMEM
532 unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
533 struct memblock_region *mem, *res;
534
535 /* set highmem page free */
536 for_each_memblock(memory, mem) {
537 unsigned long start = memblock_region_memory_base_pfn(mem);
538 unsigned long end = memblock_region_memory_end_pfn(mem);
539
540 /* Ignore complete lowmem entries */
541 if (end <= max_low)
542 continue;
543
544 /* Truncate partial highmem entries */
545 if (start < max_low)
546 start = max_low;
547
548 /* Find and exclude any reserved regions */
549 for_each_memblock(reserved, res) {
550 unsigned long res_start, res_end;
551
552 res_start = memblock_region_reserved_base_pfn(res);
553 res_end = memblock_region_reserved_end_pfn(res);
554
555 if (res_end < start)
556 continue;
557 if (res_start < start)
558 res_start = start;
559 if (res_start > end)
560 res_start = end;
561 if (res_end > end)
562 res_end = end;
563 if (res_start != start)
564 totalhigh_pages += free_area(start, res_start,
565 NULL);
566 start = res_end;
567 if (start == end)
568 break;
569 }
570
571 /* And now free anything which remains */
572 if (start < end)
573 totalhigh_pages += free_area(start, end, NULL);
574 }
575 totalram_pages += totalhigh_pages;
576#endif
577}
578
579/*
580 * mem_init() marks the free areas in the mem_map and tells us how much
581 * memory is free. This is done after various parts of the system have
582 * claimed their memory after the kernel image.
583 */
584void __init mem_init(void)
585{
586 unsigned long reserved_pages, free_pages;
587 struct memblock_region *reg;
588 int i;
589#ifdef CONFIG_HAVE_TCM
590 /* These pointers are filled in on TCM detection */
591 extern u32 dtcm_end;
592 extern u32 itcm_end;
593#endif
594
595 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
596
597 /* this will put all unused low memory onto the freelists */
598 free_unused_memmap(&meminfo);
599
600 totalram_pages += free_all_bootmem();
601
602#ifdef CONFIG_SA1111
603 /* now that our DMA memory is actually so designated, we can free it */
604 totalram_pages += free_area(PHYS_PFN_OFFSET,
605 __phys_to_pfn(__pa(swapper_pg_dir)), NULL);
606#endif
607
608 free_highpages();
609
610 reserved_pages = free_pages = 0;
611
612 for_each_bank(i, &meminfo) {
613 struct membank *bank = &meminfo.bank[i];
614 unsigned int pfn1, pfn2;
615 struct page *page, *end;
616
617 pfn1 = bank_pfn_start(bank);
618 pfn2 = bank_pfn_end(bank);
619
620 page = pfn_to_page(pfn1);
621 end = pfn_to_page(pfn2 - 1) + 1;
622
623 do {
624 if (PageReserved(page))
625 reserved_pages++;
626 else if (!page_count(page))
627 free_pages++;
628 page++;
629 } while (page < end);
630 }
631
632 /*
633 * Since our memory may not be contiguous, calculate the
634 * real number of pages we have in this system
635 */
636 printk(KERN_INFO "Memory:");
637 num_physpages = 0;
638 for_each_memblock(memory, reg) {
639 unsigned long pages = memblock_region_memory_end_pfn(reg) -
640 memblock_region_memory_base_pfn(reg);
641 num_physpages += pages;
642 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
643 }
644 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
645
646 printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
647 nr_free_pages() << (PAGE_SHIFT-10),
648 free_pages << (PAGE_SHIFT-10),
649 reserved_pages << (PAGE_SHIFT-10),
650 totalhigh_pages << (PAGE_SHIFT-10));
651
652#define MLK(b, t) b, t, ((t) - (b)) >> 10
653#define MLM(b, t) b, t, ((t) - (b)) >> 20
654#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
655
656 printk(KERN_NOTICE "Virtual kernel memory layout:\n"
657 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
658#ifdef CONFIG_HAVE_TCM
659 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
660 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
661#endif
662 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
663#ifdef CONFIG_MMU
664 " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n"
665#endif
666 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
667 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
668#ifdef CONFIG_HIGHMEM
669 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
670#endif
671 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
672 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
673 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
674 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
675 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
676
677 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
678 (PAGE_SIZE)),
679#ifdef CONFIG_HAVE_TCM
680 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
681 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
682#endif
683 MLK(FIXADDR_START, FIXADDR_TOP),
684#ifdef CONFIG_MMU
685 MLM(CONSISTENT_BASE, CONSISTENT_END),
686#endif
687 MLM(VMALLOC_START, VMALLOC_END),
688 MLM(PAGE_OFFSET, (unsigned long)high_memory),
689#ifdef CONFIG_HIGHMEM
690 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
691 (PAGE_SIZE)),
692#endif
693 MLM(MODULES_VADDR, MODULES_END),
694
695 MLK_ROUNDUP(_text, _etext),
696 MLK_ROUNDUP(__init_begin, __init_end),
697 MLK_ROUNDUP(_sdata, _edata),
698 MLK_ROUNDUP(__bss_start, __bss_stop));
699
700#undef MLK
701#undef MLM
702#undef MLK_ROUNDUP
703
704 /*
705 * Check boundaries twice: Some fundamental inconsistencies can
706 * be detected at build time already.
707 */
708#ifdef CONFIG_MMU
709 BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE);
710 BUG_ON(VMALLOC_END > CONSISTENT_BASE);
711
712 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
713 BUG_ON(TASK_SIZE > MODULES_VADDR);
714#endif
715
716#ifdef CONFIG_HIGHMEM
717 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
718 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
719#endif
720
721 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
722 extern int sysctl_overcommit_memory;
723 /*
724 * On a machine this small we won't get
725 * anywhere without overcommit, so turn
726 * it on by default.
727 */
728 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
729 }
730}
731
732void free_initmem(void)
733{
734#ifdef CONFIG_HAVE_TCM
735 extern char __tcm_start, __tcm_end;
736
737 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
738 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
739 __phys_to_pfn(__pa(&__tcm_end)),
740 "TCM link");
741#endif
742
743 poison_init_mem(__init_begin, __init_end - __init_begin);
744 if (!machine_is_integrator() && !machine_is_cintegrator())
745 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
746 __phys_to_pfn(__pa(__init_end)),
747 "init");
748}
749
750#ifdef CONFIG_BLK_DEV_INITRD
751
752static int keep_initrd;
753
754void free_initrd_mem(unsigned long start, unsigned long end)
755{
756 if (!keep_initrd) {
757 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
758 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
759 __phys_to_pfn(__pa(end)),
760 "initrd");
761 }
762}
763
764static int __init keepinitrd_setup(char *__unused)
765{
766 keep_initrd = 1;
767 return 1;
768}
769
770__setup("keepinitrd", keepinitrd_setup);
771#endif
1/*
2 * linux/arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/sched/signal.h>
17#include <linux/sched/task.h>
18#include <linux/export.h>
19#include <linux/nodemask.h>
20#include <linux/initrd.h>
21#include <linux/of_fdt.h>
22#include <linux/highmem.h>
23#include <linux/gfp.h>
24#include <linux/memblock.h>
25#include <linux/dma-contiguous.h>
26#include <linux/sizes.h>
27#include <linux/stop_machine.h>
28
29#include <asm/cp15.h>
30#include <asm/mach-types.h>
31#include <asm/memblock.h>
32#include <asm/memory.h>
33#include <asm/prom.h>
34#include <asm/sections.h>
35#include <asm/setup.h>
36#include <asm/system_info.h>
37#include <asm/tlb.h>
38#include <asm/fixmap.h>
39#include <asm/ptdump.h>
40
41#include <asm/mach/arch.h>
42#include <asm/mach/map.h>
43
44#include "mm.h"
45
46#ifdef CONFIG_CPU_CP15_MMU
47unsigned long __init __clear_cr(unsigned long mask)
48{
49 cr_alignment = cr_alignment & ~mask;
50 return cr_alignment;
51}
52#endif
53
54static phys_addr_t phys_initrd_start __initdata = 0;
55static unsigned long phys_initrd_size __initdata = 0;
56
57static int __init early_initrd(char *p)
58{
59 phys_addr_t start;
60 unsigned long size;
61 char *endp;
62
63 start = memparse(p, &endp);
64 if (*endp == ',') {
65 size = memparse(endp + 1, NULL);
66
67 phys_initrd_start = start;
68 phys_initrd_size = size;
69 }
70 return 0;
71}
72early_param("initrd", early_initrd);
73
74static int __init parse_tag_initrd(const struct tag *tag)
75{
76 pr_warn("ATAG_INITRD is deprecated; "
77 "please update your bootloader.\n");
78 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
79 phys_initrd_size = tag->u.initrd.size;
80 return 0;
81}
82
83__tagtable(ATAG_INITRD, parse_tag_initrd);
84
85static int __init parse_tag_initrd2(const struct tag *tag)
86{
87 phys_initrd_start = tag->u.initrd.start;
88 phys_initrd_size = tag->u.initrd.size;
89 return 0;
90}
91
92__tagtable(ATAG_INITRD2, parse_tag_initrd2);
93
94static void __init find_limits(unsigned long *min, unsigned long *max_low,
95 unsigned long *max_high)
96{
97 *max_low = PFN_DOWN(memblock_get_current_limit());
98 *min = PFN_UP(memblock_start_of_DRAM());
99 *max_high = PFN_DOWN(memblock_end_of_DRAM());
100}
101
102#ifdef CONFIG_ZONE_DMA
103
104phys_addr_t arm_dma_zone_size __read_mostly;
105EXPORT_SYMBOL(arm_dma_zone_size);
106
107/*
108 * The DMA mask corresponding to the maximum bus address allocatable
109 * using GFP_DMA. The default here places no restriction on DMA
110 * allocations. This must be the smallest DMA mask in the system,
111 * so a successful GFP_DMA allocation will always satisfy this.
112 */
113phys_addr_t arm_dma_limit;
114unsigned long arm_dma_pfn_limit;
115
116static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
117 unsigned long dma_size)
118{
119 if (size[0] <= dma_size)
120 return;
121
122 size[ZONE_NORMAL] = size[0] - dma_size;
123 size[ZONE_DMA] = dma_size;
124 hole[ZONE_NORMAL] = hole[0];
125 hole[ZONE_DMA] = 0;
126}
127#endif
128
129void __init setup_dma_zone(const struct machine_desc *mdesc)
130{
131#ifdef CONFIG_ZONE_DMA
132 if (mdesc->dma_zone_size) {
133 arm_dma_zone_size = mdesc->dma_zone_size;
134 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
135 } else
136 arm_dma_limit = 0xffffffff;
137 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
138#endif
139}
140
141static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
142 unsigned long max_high)
143{
144 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
145 struct memblock_region *reg;
146
147 /*
148 * initialise the zones.
149 */
150 memset(zone_size, 0, sizeof(zone_size));
151
152 /*
153 * The memory size has already been determined. If we need
154 * to do anything fancy with the allocation of this memory
155 * to the zones, now is the time to do it.
156 */
157 zone_size[0] = max_low - min;
158#ifdef CONFIG_HIGHMEM
159 zone_size[ZONE_HIGHMEM] = max_high - max_low;
160#endif
161
162 /*
163 * Calculate the size of the holes.
164 * holes = node_size - sum(bank_sizes)
165 */
166 memcpy(zhole_size, zone_size, sizeof(zhole_size));
167 for_each_memblock(memory, reg) {
168 unsigned long start = memblock_region_memory_base_pfn(reg);
169 unsigned long end = memblock_region_memory_end_pfn(reg);
170
171 if (start < max_low) {
172 unsigned long low_end = min(end, max_low);
173 zhole_size[0] -= low_end - start;
174 }
175#ifdef CONFIG_HIGHMEM
176 if (end > max_low) {
177 unsigned long high_start = max(start, max_low);
178 zhole_size[ZONE_HIGHMEM] -= end - high_start;
179 }
180#endif
181 }
182
183#ifdef CONFIG_ZONE_DMA
184 /*
185 * Adjust the sizes according to any special requirements for
186 * this machine type.
187 */
188 if (arm_dma_zone_size)
189 arm_adjust_dma_zone(zone_size, zhole_size,
190 arm_dma_zone_size >> PAGE_SHIFT);
191#endif
192
193 free_area_init_node(0, zone_size, min, zhole_size);
194}
195
196#ifdef CONFIG_HAVE_ARCH_PFN_VALID
197int pfn_valid(unsigned long pfn)
198{
199 return memblock_is_map_memory(__pfn_to_phys(pfn));
200}
201EXPORT_SYMBOL(pfn_valid);
202#endif
203
204#ifndef CONFIG_SPARSEMEM
205static void __init arm_memory_present(void)
206{
207}
208#else
209static void __init arm_memory_present(void)
210{
211 struct memblock_region *reg;
212
213 for_each_memblock(memory, reg)
214 memory_present(0, memblock_region_memory_base_pfn(reg),
215 memblock_region_memory_end_pfn(reg));
216}
217#endif
218
219static bool arm_memblock_steal_permitted = true;
220
221phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
222{
223 phys_addr_t phys;
224
225 BUG_ON(!arm_memblock_steal_permitted);
226
227 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
228 memblock_free(phys, size);
229 memblock_remove(phys, size);
230
231 return phys;
232}
233
234static void __init arm_initrd_init(void)
235{
236#ifdef CONFIG_BLK_DEV_INITRD
237 phys_addr_t start;
238 unsigned long size;
239
240 /* FDT scan will populate initrd_start */
241 if (initrd_start && !phys_initrd_size) {
242 phys_initrd_start = __virt_to_phys(initrd_start);
243 phys_initrd_size = initrd_end - initrd_start;
244 }
245
246 initrd_start = initrd_end = 0;
247
248 if (!phys_initrd_size)
249 return;
250
251 /*
252 * Round the memory region to page boundaries as per free_initrd_mem()
253 * This allows us to detect whether the pages overlapping the initrd
254 * are in use, but more importantly, reserves the entire set of pages
255 * as we don't want these pages allocated for other purposes.
256 */
257 start = round_down(phys_initrd_start, PAGE_SIZE);
258 size = phys_initrd_size + (phys_initrd_start - start);
259 size = round_up(size, PAGE_SIZE);
260
261 if (!memblock_is_region_memory(start, size)) {
262 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
263 (u64)start, size);
264 return;
265 }
266
267 if (memblock_is_region_reserved(start, size)) {
268 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
269 (u64)start, size);
270 return;
271 }
272
273 memblock_reserve(start, size);
274
275 /* Now convert initrd to virtual addresses */
276 initrd_start = __phys_to_virt(phys_initrd_start);
277 initrd_end = initrd_start + phys_initrd_size;
278#endif
279}
280
281void __init arm_memblock_init(const struct machine_desc *mdesc)
282{
283 /* Register the kernel text, kernel data and initrd with memblock. */
284 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
285
286 arm_initrd_init();
287
288 arm_mm_memblock_reserve();
289
290 /* reserve any platform specific memblock areas */
291 if (mdesc->reserve)
292 mdesc->reserve();
293
294 early_init_fdt_reserve_self();
295 early_init_fdt_scan_reserved_mem();
296
297 /* reserve memory for DMA contiguous allocations */
298 dma_contiguous_reserve(arm_dma_limit);
299
300 arm_memblock_steal_permitted = false;
301 memblock_dump_all();
302}
303
304void __init bootmem_init(void)
305{
306 unsigned long min, max_low, max_high;
307
308 memblock_allow_resize();
309 max_low = max_high = 0;
310
311 find_limits(&min, &max_low, &max_high);
312
313 early_memtest((phys_addr_t)min << PAGE_SHIFT,
314 (phys_addr_t)max_low << PAGE_SHIFT);
315
316 /*
317 * Sparsemem tries to allocate bootmem in memory_present(),
318 * so must be done after the fixed reservations
319 */
320 arm_memory_present();
321
322 /*
323 * sparse_init() needs the bootmem allocator up and running.
324 */
325 sparse_init();
326
327 /*
328 * Now free the memory - free_area_init_node needs
329 * the sparse mem_map arrays initialized by sparse_init()
330 * for memmap_init_zone(), otherwise all PFNs are invalid.
331 */
332 zone_sizes_init(min, max_low, max_high);
333
334 /*
335 * This doesn't seem to be used by the Linux memory manager any
336 * more, but is used by ll_rw_block. If we can get rid of it, we
337 * also get rid of some of the stuff above as well.
338 */
339 min_low_pfn = min;
340 max_low_pfn = max_low;
341 max_pfn = max_high;
342}
343
344/*
345 * Poison init memory with an undefined instruction (ARM) or a branch to an
346 * undefined instruction (Thumb).
347 */
348static inline void poison_init_mem(void *s, size_t count)
349{
350 u32 *p = (u32 *)s;
351 for (; count != 0; count -= 4)
352 *p++ = 0xe7fddef0;
353}
354
355static inline void
356free_memmap(unsigned long start_pfn, unsigned long end_pfn)
357{
358 struct page *start_pg, *end_pg;
359 phys_addr_t pg, pgend;
360
361 /*
362 * Convert start_pfn/end_pfn to a struct page pointer.
363 */
364 start_pg = pfn_to_page(start_pfn - 1) + 1;
365 end_pg = pfn_to_page(end_pfn - 1) + 1;
366
367 /*
368 * Convert to physical addresses, and
369 * round start upwards and end downwards.
370 */
371 pg = PAGE_ALIGN(__pa(start_pg));
372 pgend = __pa(end_pg) & PAGE_MASK;
373
374 /*
375 * If there are free pages between these,
376 * free the section of the memmap array.
377 */
378 if (pg < pgend)
379 memblock_free_early(pg, pgend - pg);
380}
381
382/*
383 * The mem_map array can get very big. Free the unused area of the memory map.
384 */
385static void __init free_unused_memmap(void)
386{
387 unsigned long start, prev_end = 0;
388 struct memblock_region *reg;
389
390 /*
391 * This relies on each bank being in address order.
392 * The banks are sorted previously in bootmem_init().
393 */
394 for_each_memblock(memory, reg) {
395 start = memblock_region_memory_base_pfn(reg);
396
397#ifdef CONFIG_SPARSEMEM
398 /*
399 * Take care not to free memmap entries that don't exist
400 * due to SPARSEMEM sections which aren't present.
401 */
402 start = min(start,
403 ALIGN(prev_end, PAGES_PER_SECTION));
404#else
405 /*
406 * Align down here since the VM subsystem insists that the
407 * memmap entries are valid from the bank start aligned to
408 * MAX_ORDER_NR_PAGES.
409 */
410 start = round_down(start, MAX_ORDER_NR_PAGES);
411#endif
412 /*
413 * If we had a previous bank, and there is a space
414 * between the current bank and the previous, free it.
415 */
416 if (prev_end && prev_end < start)
417 free_memmap(prev_end, start);
418
419 /*
420 * Align up here since the VM subsystem insists that the
421 * memmap entries are valid from the bank end aligned to
422 * MAX_ORDER_NR_PAGES.
423 */
424 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
425 MAX_ORDER_NR_PAGES);
426 }
427
428#ifdef CONFIG_SPARSEMEM
429 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
430 free_memmap(prev_end,
431 ALIGN(prev_end, PAGES_PER_SECTION));
432#endif
433}
434
435#ifdef CONFIG_HIGHMEM
436static inline void free_area_high(unsigned long pfn, unsigned long end)
437{
438 for (; pfn < end; pfn++)
439 free_highmem_page(pfn_to_page(pfn));
440}
441#endif
442
443static void __init free_highpages(void)
444{
445#ifdef CONFIG_HIGHMEM
446 unsigned long max_low = max_low_pfn;
447 struct memblock_region *mem, *res;
448
449 /* set highmem page free */
450 for_each_memblock(memory, mem) {
451 unsigned long start = memblock_region_memory_base_pfn(mem);
452 unsigned long end = memblock_region_memory_end_pfn(mem);
453
454 /* Ignore complete lowmem entries */
455 if (end <= max_low)
456 continue;
457
458 if (memblock_is_nomap(mem))
459 continue;
460
461 /* Truncate partial highmem entries */
462 if (start < max_low)
463 start = max_low;
464
465 /* Find and exclude any reserved regions */
466 for_each_memblock(reserved, res) {
467 unsigned long res_start, res_end;
468
469 res_start = memblock_region_reserved_base_pfn(res);
470 res_end = memblock_region_reserved_end_pfn(res);
471
472 if (res_end < start)
473 continue;
474 if (res_start < start)
475 res_start = start;
476 if (res_start > end)
477 res_start = end;
478 if (res_end > end)
479 res_end = end;
480 if (res_start != start)
481 free_area_high(start, res_start);
482 start = res_end;
483 if (start == end)
484 break;
485 }
486
487 /* And now free anything which remains */
488 if (start < end)
489 free_area_high(start, end);
490 }
491#endif
492}
493
494/*
495 * mem_init() marks the free areas in the mem_map and tells us how much
496 * memory is free. This is done after various parts of the system have
497 * claimed their memory after the kernel image.
498 */
499void __init mem_init(void)
500{
501#ifdef CONFIG_HAVE_TCM
502 /* These pointers are filled in on TCM detection */
503 extern u32 dtcm_end;
504 extern u32 itcm_end;
505#endif
506
507 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
508
509 /* this will put all unused low memory onto the freelists */
510 free_unused_memmap();
511 free_all_bootmem();
512
513#ifdef CONFIG_SA1111
514 /* now that our DMA memory is actually so designated, we can free it */
515 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
516#endif
517
518 free_highpages();
519
520 mem_init_print_info(NULL);
521
522#define MLK(b, t) b, t, ((t) - (b)) >> 10
523#define MLM(b, t) b, t, ((t) - (b)) >> 20
524#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
525
526 pr_notice("Virtual kernel memory layout:\n"
527 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
528#ifdef CONFIG_HAVE_TCM
529 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
530 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
531#endif
532 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
533 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
534 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
535#ifdef CONFIG_HIGHMEM
536 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
537#endif
538#ifdef CONFIG_MODULES
539 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
540#endif
541 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
542 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
543 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
544 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
545
546 MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
547#ifdef CONFIG_HAVE_TCM
548 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
549 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
550#endif
551 MLK(FIXADDR_START, FIXADDR_END),
552 MLM(VMALLOC_START, VMALLOC_END),
553 MLM(PAGE_OFFSET, (unsigned long)high_memory),
554#ifdef CONFIG_HIGHMEM
555 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
556 (PAGE_SIZE)),
557#endif
558#ifdef CONFIG_MODULES
559 MLM(MODULES_VADDR, MODULES_END),
560#endif
561
562 MLK_ROUNDUP(_text, _etext),
563 MLK_ROUNDUP(__init_begin, __init_end),
564 MLK_ROUNDUP(_sdata, _edata),
565 MLK_ROUNDUP(__bss_start, __bss_stop));
566
567#undef MLK
568#undef MLM
569#undef MLK_ROUNDUP
570
571 /*
572 * Check boundaries twice: Some fundamental inconsistencies can
573 * be detected at build time already.
574 */
575#ifdef CONFIG_MMU
576 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
577 BUG_ON(TASK_SIZE > MODULES_VADDR);
578#endif
579
580#ifdef CONFIG_HIGHMEM
581 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
582 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
583#endif
584}
585
586#ifdef CONFIG_STRICT_KERNEL_RWX
587struct section_perm {
588 const char *name;
589 unsigned long start;
590 unsigned long end;
591 pmdval_t mask;
592 pmdval_t prot;
593 pmdval_t clear;
594};
595
596/* First section-aligned location at or after __start_rodata. */
597extern char __start_rodata_section_aligned[];
598
599static struct section_perm nx_perms[] = {
600 /* Make pages tables, etc before _stext RW (set NX). */
601 {
602 .name = "pre-text NX",
603 .start = PAGE_OFFSET,
604 .end = (unsigned long)_stext,
605 .mask = ~PMD_SECT_XN,
606 .prot = PMD_SECT_XN,
607 },
608 /* Make init RW (set NX). */
609 {
610 .name = "init NX",
611 .start = (unsigned long)__init_begin,
612 .end = (unsigned long)_sdata,
613 .mask = ~PMD_SECT_XN,
614 .prot = PMD_SECT_XN,
615 },
616 /* Make rodata NX (set RO in ro_perms below). */
617 {
618 .name = "rodata NX",
619 .start = (unsigned long)__start_rodata_section_aligned,
620 .end = (unsigned long)__init_begin,
621 .mask = ~PMD_SECT_XN,
622 .prot = PMD_SECT_XN,
623 },
624};
625
626static struct section_perm ro_perms[] = {
627 /* Make kernel code and rodata RX (set RO). */
628 {
629 .name = "text/rodata RO",
630 .start = (unsigned long)_stext,
631 .end = (unsigned long)__init_begin,
632#ifdef CONFIG_ARM_LPAE
633 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
634 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
635#else
636 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
637 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
638 .clear = PMD_SECT_AP_WRITE,
639#endif
640 },
641};
642
643/*
644 * Updates section permissions only for the current mm (sections are
645 * copied into each mm). During startup, this is the init_mm. Is only
646 * safe to be called with preemption disabled, as under stop_machine().
647 */
648static inline void section_update(unsigned long addr, pmdval_t mask,
649 pmdval_t prot, struct mm_struct *mm)
650{
651 pmd_t *pmd;
652
653 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
654
655#ifdef CONFIG_ARM_LPAE
656 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
657#else
658 if (addr & SECTION_SIZE)
659 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
660 else
661 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
662#endif
663 flush_pmd_entry(pmd);
664 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
665}
666
667/* Make sure extended page tables are in use. */
668static inline bool arch_has_strict_perms(void)
669{
670 if (cpu_architecture() < CPU_ARCH_ARMv6)
671 return false;
672
673 return !!(get_cr() & CR_XP);
674}
675
676void set_section_perms(struct section_perm *perms, int n, bool set,
677 struct mm_struct *mm)
678{
679 size_t i;
680 unsigned long addr;
681
682 if (!arch_has_strict_perms())
683 return;
684
685 for (i = 0; i < n; i++) {
686 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
687 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
688 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
689 perms[i].name, perms[i].start, perms[i].end,
690 SECTION_SIZE);
691 continue;
692 }
693
694 for (addr = perms[i].start;
695 addr < perms[i].end;
696 addr += SECTION_SIZE)
697 section_update(addr, perms[i].mask,
698 set ? perms[i].prot : perms[i].clear, mm);
699 }
700
701}
702
703/**
704 * update_sections_early intended to be called only through stop_machine
705 * framework and executed by only one CPU while all other CPUs will spin and
706 * wait, so no locking is required in this function.
707 */
708static void update_sections_early(struct section_perm perms[], int n)
709{
710 struct task_struct *t, *s;
711
712 for_each_process(t) {
713 if (t->flags & PF_KTHREAD)
714 continue;
715 for_each_thread(t, s)
716 set_section_perms(perms, n, true, s->mm);
717 }
718 set_section_perms(perms, n, true, current->active_mm);
719 set_section_perms(perms, n, true, &init_mm);
720}
721
722static int __fix_kernmem_perms(void *unused)
723{
724 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
725 return 0;
726}
727
728static void fix_kernmem_perms(void)
729{
730 stop_machine(__fix_kernmem_perms, NULL, NULL);
731}
732
733static int __mark_rodata_ro(void *unused)
734{
735 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
736 return 0;
737}
738
739void mark_rodata_ro(void)
740{
741 stop_machine(__mark_rodata_ro, NULL, NULL);
742 debug_checkwx();
743}
744
745void set_kernel_text_rw(void)
746{
747 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
748 current->active_mm);
749}
750
751void set_kernel_text_ro(void)
752{
753 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
754 current->active_mm);
755}
756
757#else
758static inline void fix_kernmem_perms(void) { }
759#endif /* CONFIG_STRICT_KERNEL_RWX */
760
761void free_initmem(void)
762{
763 fix_kernmem_perms();
764
765 poison_init_mem(__init_begin, __init_end - __init_begin);
766 if (!machine_is_integrator() && !machine_is_cintegrator())
767 free_initmem_default(-1);
768}
769
770#ifdef CONFIG_BLK_DEV_INITRD
771
772static int keep_initrd;
773
774void free_initrd_mem(unsigned long start, unsigned long end)
775{
776 if (!keep_initrd) {
777 if (start == initrd_start)
778 start = round_down(start, PAGE_SIZE);
779 if (end == initrd_end)
780 end = round_up(end, PAGE_SIZE);
781
782 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
783 free_reserved_area((void *)start, (void *)end, -1, "initrd");
784 }
785}
786
787static int __init keepinitrd_setup(char *__unused)
788{
789 keep_initrd = 1;
790 return 1;
791}
792
793__setup("keepinitrd", keepinitrd_setup);
794#endif