Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 */
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/swap.h>
10#include <linux/init.h>
11#include <linux/mman.h>
12#include <linux/sched/signal.h>
13#include <linux/sched/task.h>
14#include <linux/export.h>
15#include <linux/nodemask.h>
16#include <linux/initrd.h>
17#include <linux/of_fdt.h>
18#include <linux/highmem.h>
19#include <linux/gfp.h>
20#include <linux/memblock.h>
21#include <linux/dma-contiguous.h>
22#include <linux/sizes.h>
23#include <linux/stop_machine.h>
24#include <linux/swiotlb.h>
25
26#include <asm/cp15.h>
27#include <asm/mach-types.h>
28#include <asm/memblock.h>
29#include <asm/memory.h>
30#include <asm/prom.h>
31#include <asm/sections.h>
32#include <asm/setup.h>
33#include <asm/system_info.h>
34#include <asm/tlb.h>
35#include <asm/fixmap.h>
36#include <asm/ptdump.h>
37
38#include <asm/mach/arch.h>
39#include <asm/mach/map.h>
40
41#include "mm.h"
42
43#ifdef CONFIG_CPU_CP15_MMU
44unsigned long __init __clear_cr(unsigned long mask)
45{
46 cr_alignment = cr_alignment & ~mask;
47 return cr_alignment;
48}
49#endif
50
51#ifdef CONFIG_BLK_DEV_INITRD
52static int __init parse_tag_initrd(const struct tag *tag)
53{
54 pr_warn("ATAG_INITRD is deprecated; "
55 "please update your bootloader.\n");
56 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
57 phys_initrd_size = tag->u.initrd.size;
58 return 0;
59}
60
61__tagtable(ATAG_INITRD, parse_tag_initrd);
62
63static int __init parse_tag_initrd2(const struct tag *tag)
64{
65 phys_initrd_start = tag->u.initrd.start;
66 phys_initrd_size = tag->u.initrd.size;
67 return 0;
68}
69
70__tagtable(ATAG_INITRD2, parse_tag_initrd2);
71#endif
72
73static void __init find_limits(unsigned long *min, unsigned long *max_low,
74 unsigned long *max_high)
75{
76 *max_low = PFN_DOWN(memblock_get_current_limit());
77 *min = PFN_UP(memblock_start_of_DRAM());
78 *max_high = PFN_DOWN(memblock_end_of_DRAM());
79}
80
81#ifdef CONFIG_ZONE_DMA
82
83phys_addr_t arm_dma_zone_size __read_mostly;
84EXPORT_SYMBOL(arm_dma_zone_size);
85
86/*
87 * The DMA mask corresponding to the maximum bus address allocatable
88 * using GFP_DMA. The default here places no restriction on DMA
89 * allocations. This must be the smallest DMA mask in the system,
90 * so a successful GFP_DMA allocation will always satisfy this.
91 */
92phys_addr_t arm_dma_limit;
93unsigned long arm_dma_pfn_limit;
94
95static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
96 unsigned long dma_size)
97{
98 if (size[0] <= dma_size)
99 return;
100
101 size[ZONE_NORMAL] = size[0] - dma_size;
102 size[ZONE_DMA] = dma_size;
103 hole[ZONE_NORMAL] = hole[0];
104 hole[ZONE_DMA] = 0;
105}
106#endif
107
108void __init setup_dma_zone(const struct machine_desc *mdesc)
109{
110#ifdef CONFIG_ZONE_DMA
111 if (mdesc->dma_zone_size) {
112 arm_dma_zone_size = mdesc->dma_zone_size;
113 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
114 } else
115 arm_dma_limit = 0xffffffff;
116 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
117#endif
118}
119
120static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
121 unsigned long max_high)
122{
123 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
124 struct memblock_region *reg;
125
126 /*
127 * initialise the zones.
128 */
129 memset(zone_size, 0, sizeof(zone_size));
130
131 /*
132 * The memory size has already been determined. If we need
133 * to do anything fancy with the allocation of this memory
134 * to the zones, now is the time to do it.
135 */
136 zone_size[0] = max_low - min;
137#ifdef CONFIG_HIGHMEM
138 zone_size[ZONE_HIGHMEM] = max_high - max_low;
139#endif
140
141 /*
142 * Calculate the size of the holes.
143 * holes = node_size - sum(bank_sizes)
144 */
145 memcpy(zhole_size, zone_size, sizeof(zhole_size));
146 for_each_memblock(memory, reg) {
147 unsigned long start = memblock_region_memory_base_pfn(reg);
148 unsigned long end = memblock_region_memory_end_pfn(reg);
149
150 if (start < max_low) {
151 unsigned long low_end = min(end, max_low);
152 zhole_size[0] -= low_end - start;
153 }
154#ifdef CONFIG_HIGHMEM
155 if (end > max_low) {
156 unsigned long high_start = max(start, max_low);
157 zhole_size[ZONE_HIGHMEM] -= end - high_start;
158 }
159#endif
160 }
161
162#ifdef CONFIG_ZONE_DMA
163 /*
164 * Adjust the sizes according to any special requirements for
165 * this machine type.
166 */
167 if (arm_dma_zone_size)
168 arm_adjust_dma_zone(zone_size, zhole_size,
169 arm_dma_zone_size >> PAGE_SHIFT);
170#endif
171
172 free_area_init_node(0, zone_size, min, zhole_size);
173}
174
175#ifdef CONFIG_HAVE_ARCH_PFN_VALID
176int pfn_valid(unsigned long pfn)
177{
178 phys_addr_t addr = __pfn_to_phys(pfn);
179
180 if (__phys_to_pfn(addr) != pfn)
181 return 0;
182
183 return memblock_is_map_memory(__pfn_to_phys(pfn));
184}
185EXPORT_SYMBOL(pfn_valid);
186#endif
187
188static bool arm_memblock_steal_permitted = true;
189
190phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
191{
192 phys_addr_t phys;
193
194 BUG_ON(!arm_memblock_steal_permitted);
195
196 phys = memblock_phys_alloc(size, align);
197 if (!phys)
198 panic("Failed to steal %pa bytes at %pS\n",
199 &size, (void *)_RET_IP_);
200
201 memblock_free(phys, size);
202 memblock_remove(phys, size);
203
204 return phys;
205}
206
207static void __init arm_initrd_init(void)
208{
209#ifdef CONFIG_BLK_DEV_INITRD
210 phys_addr_t start;
211 unsigned long size;
212
213 initrd_start = initrd_end = 0;
214
215 if (!phys_initrd_size)
216 return;
217
218 /*
219 * Round the memory region to page boundaries as per free_initrd_mem()
220 * This allows us to detect whether the pages overlapping the initrd
221 * are in use, but more importantly, reserves the entire set of pages
222 * as we don't want these pages allocated for other purposes.
223 */
224 start = round_down(phys_initrd_start, PAGE_SIZE);
225 size = phys_initrd_size + (phys_initrd_start - start);
226 size = round_up(size, PAGE_SIZE);
227
228 if (!memblock_is_region_memory(start, size)) {
229 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
230 (u64)start, size);
231 return;
232 }
233
234 if (memblock_is_region_reserved(start, size)) {
235 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
236 (u64)start, size);
237 return;
238 }
239
240 memblock_reserve(start, size);
241
242 /* Now convert initrd to virtual addresses */
243 initrd_start = __phys_to_virt(phys_initrd_start);
244 initrd_end = initrd_start + phys_initrd_size;
245#endif
246}
247
248#ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
249void check_cpu_icache_size(int cpuid)
250{
251 u32 size, ctr;
252
253 asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
254
255 size = 1 << ((ctr & 0xf) + 2);
256 if (cpuid != 0 && icache_size != size)
257 pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
258 cpuid);
259 if (icache_size > size)
260 icache_size = size;
261}
262#endif
263
264void __init arm_memblock_init(const struct machine_desc *mdesc)
265{
266 /* Register the kernel text, kernel data and initrd with memblock. */
267 memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
268
269 arm_initrd_init();
270
271 arm_mm_memblock_reserve();
272
273 /* reserve any platform specific memblock areas */
274 if (mdesc->reserve)
275 mdesc->reserve();
276
277 early_init_fdt_reserve_self();
278 early_init_fdt_scan_reserved_mem();
279
280 /* reserve memory for DMA contiguous allocations */
281 dma_contiguous_reserve(arm_dma_limit);
282
283 arm_memblock_steal_permitted = false;
284 memblock_dump_all();
285}
286
287void __init bootmem_init(void)
288{
289 memblock_allow_resize();
290
291 find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
292
293 early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
294 (phys_addr_t)max_low_pfn << PAGE_SHIFT);
295
296 /*
297 * Sparsemem tries to allocate bootmem in memory_present(),
298 * so must be done after the fixed reservations
299 */
300 memblocks_present();
301
302 /*
303 * sparse_init() needs the bootmem allocator up and running.
304 */
305 sparse_init();
306
307 /*
308 * Now free the memory - free_area_init_node needs
309 * the sparse mem_map arrays initialized by sparse_init()
310 * for memmap_init_zone(), otherwise all PFNs are invalid.
311 */
312 zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
313}
314
315/*
316 * Poison init memory with an undefined instruction (ARM) or a branch to an
317 * undefined instruction (Thumb).
318 */
319static inline void poison_init_mem(void *s, size_t count)
320{
321 u32 *p = (u32 *)s;
322 for (; count != 0; count -= 4)
323 *p++ = 0xe7fddef0;
324}
325
326static inline void
327free_memmap(unsigned long start_pfn, unsigned long end_pfn)
328{
329 struct page *start_pg, *end_pg;
330 phys_addr_t pg, pgend;
331
332 /*
333 * Convert start_pfn/end_pfn to a struct page pointer.
334 */
335 start_pg = pfn_to_page(start_pfn - 1) + 1;
336 end_pg = pfn_to_page(end_pfn - 1) + 1;
337
338 /*
339 * Convert to physical addresses, and
340 * round start upwards and end downwards.
341 */
342 pg = PAGE_ALIGN(__pa(start_pg));
343 pgend = __pa(end_pg) & PAGE_MASK;
344
345 /*
346 * If there are free pages between these,
347 * free the section of the memmap array.
348 */
349 if (pg < pgend)
350 memblock_free_early(pg, pgend - pg);
351}
352
353/*
354 * The mem_map array can get very big. Free the unused area of the memory map.
355 */
356static void __init free_unused_memmap(void)
357{
358 unsigned long start, prev_end = 0;
359 struct memblock_region *reg;
360
361 /*
362 * This relies on each bank being in address order.
363 * The banks are sorted previously in bootmem_init().
364 */
365 for_each_memblock(memory, reg) {
366 start = memblock_region_memory_base_pfn(reg);
367
368#ifdef CONFIG_SPARSEMEM
369 /*
370 * Take care not to free memmap entries that don't exist
371 * due to SPARSEMEM sections which aren't present.
372 */
373 start = min(start,
374 ALIGN(prev_end, PAGES_PER_SECTION));
375#else
376 /*
377 * Align down here since the VM subsystem insists that the
378 * memmap entries are valid from the bank start aligned to
379 * MAX_ORDER_NR_PAGES.
380 */
381 start = round_down(start, MAX_ORDER_NR_PAGES);
382#endif
383 /*
384 * If we had a previous bank, and there is a space
385 * between the current bank and the previous, free it.
386 */
387 if (prev_end && prev_end < start)
388 free_memmap(prev_end, start);
389
390 /*
391 * Align up here since the VM subsystem insists that the
392 * memmap entries are valid from the bank end aligned to
393 * MAX_ORDER_NR_PAGES.
394 */
395 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
396 MAX_ORDER_NR_PAGES);
397 }
398
399#ifdef CONFIG_SPARSEMEM
400 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
401 free_memmap(prev_end,
402 ALIGN(prev_end, PAGES_PER_SECTION));
403#endif
404}
405
406#ifdef CONFIG_HIGHMEM
407static inline void free_area_high(unsigned long pfn, unsigned long end)
408{
409 for (; pfn < end; pfn++)
410 free_highmem_page(pfn_to_page(pfn));
411}
412#endif
413
414static void __init free_highpages(void)
415{
416#ifdef CONFIG_HIGHMEM
417 unsigned long max_low = max_low_pfn;
418 struct memblock_region *mem, *res;
419
420 /* set highmem page free */
421 for_each_memblock(memory, mem) {
422 unsigned long start = memblock_region_memory_base_pfn(mem);
423 unsigned long end = memblock_region_memory_end_pfn(mem);
424
425 /* Ignore complete lowmem entries */
426 if (end <= max_low)
427 continue;
428
429 if (memblock_is_nomap(mem))
430 continue;
431
432 /* Truncate partial highmem entries */
433 if (start < max_low)
434 start = max_low;
435
436 /* Find and exclude any reserved regions */
437 for_each_memblock(reserved, res) {
438 unsigned long res_start, res_end;
439
440 res_start = memblock_region_reserved_base_pfn(res);
441 res_end = memblock_region_reserved_end_pfn(res);
442
443 if (res_end < start)
444 continue;
445 if (res_start < start)
446 res_start = start;
447 if (res_start > end)
448 res_start = end;
449 if (res_end > end)
450 res_end = end;
451 if (res_start != start)
452 free_area_high(start, res_start);
453 start = res_end;
454 if (start == end)
455 break;
456 }
457
458 /* And now free anything which remains */
459 if (start < end)
460 free_area_high(start, end);
461 }
462#endif
463}
464
465/*
466 * mem_init() marks the free areas in the mem_map and tells us how much
467 * memory is free. This is done after various parts of the system have
468 * claimed their memory after the kernel image.
469 */
470void __init mem_init(void)
471{
472#ifdef CONFIG_ARM_LPAE
473 swiotlb_init(1);
474#endif
475
476 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
477
478 /* this will put all unused low memory onto the freelists */
479 free_unused_memmap();
480 memblock_free_all();
481
482#ifdef CONFIG_SA1111
483 /* now that our DMA memory is actually so designated, we can free it */
484 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
485#endif
486
487 free_highpages();
488
489 mem_init_print_info(NULL);
490
491 /*
492 * Check boundaries twice: Some fundamental inconsistencies can
493 * be detected at build time already.
494 */
495#ifdef CONFIG_MMU
496 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
497 BUG_ON(TASK_SIZE > MODULES_VADDR);
498#endif
499
500#ifdef CONFIG_HIGHMEM
501 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
502 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
503#endif
504}
505
506#ifdef CONFIG_STRICT_KERNEL_RWX
507struct section_perm {
508 const char *name;
509 unsigned long start;
510 unsigned long end;
511 pmdval_t mask;
512 pmdval_t prot;
513 pmdval_t clear;
514};
515
516/* First section-aligned location at or after __start_rodata. */
517extern char __start_rodata_section_aligned[];
518
519static struct section_perm nx_perms[] = {
520 /* Make pages tables, etc before _stext RW (set NX). */
521 {
522 .name = "pre-text NX",
523 .start = PAGE_OFFSET,
524 .end = (unsigned long)_stext,
525 .mask = ~PMD_SECT_XN,
526 .prot = PMD_SECT_XN,
527 },
528 /* Make init RW (set NX). */
529 {
530 .name = "init NX",
531 .start = (unsigned long)__init_begin,
532 .end = (unsigned long)_sdata,
533 .mask = ~PMD_SECT_XN,
534 .prot = PMD_SECT_XN,
535 },
536 /* Make rodata NX (set RO in ro_perms below). */
537 {
538 .name = "rodata NX",
539 .start = (unsigned long)__start_rodata_section_aligned,
540 .end = (unsigned long)__init_begin,
541 .mask = ~PMD_SECT_XN,
542 .prot = PMD_SECT_XN,
543 },
544};
545
546static struct section_perm ro_perms[] = {
547 /* Make kernel code and rodata RX (set RO). */
548 {
549 .name = "text/rodata RO",
550 .start = (unsigned long)_stext,
551 .end = (unsigned long)__init_begin,
552#ifdef CONFIG_ARM_LPAE
553 .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
554 .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
555#else
556 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
557 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
558 .clear = PMD_SECT_AP_WRITE,
559#endif
560 },
561};
562
563/*
564 * Updates section permissions only for the current mm (sections are
565 * copied into each mm). During startup, this is the init_mm. Is only
566 * safe to be called with preemption disabled, as under stop_machine().
567 */
568static inline void section_update(unsigned long addr, pmdval_t mask,
569 pmdval_t prot, struct mm_struct *mm)
570{
571 pmd_t *pmd;
572
573 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
574
575#ifdef CONFIG_ARM_LPAE
576 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
577#else
578 if (addr & SECTION_SIZE)
579 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
580 else
581 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
582#endif
583 flush_pmd_entry(pmd);
584 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
585}
586
587/* Make sure extended page tables are in use. */
588static inline bool arch_has_strict_perms(void)
589{
590 if (cpu_architecture() < CPU_ARCH_ARMv6)
591 return false;
592
593 return !!(get_cr() & CR_XP);
594}
595
596void set_section_perms(struct section_perm *perms, int n, bool set,
597 struct mm_struct *mm)
598{
599 size_t i;
600 unsigned long addr;
601
602 if (!arch_has_strict_perms())
603 return;
604
605 for (i = 0; i < n; i++) {
606 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
607 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
608 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
609 perms[i].name, perms[i].start, perms[i].end,
610 SECTION_SIZE);
611 continue;
612 }
613
614 for (addr = perms[i].start;
615 addr < perms[i].end;
616 addr += SECTION_SIZE)
617 section_update(addr, perms[i].mask,
618 set ? perms[i].prot : perms[i].clear, mm);
619 }
620
621}
622
623/**
624 * update_sections_early intended to be called only through stop_machine
625 * framework and executed by only one CPU while all other CPUs will spin and
626 * wait, so no locking is required in this function.
627 */
628static void update_sections_early(struct section_perm perms[], int n)
629{
630 struct task_struct *t, *s;
631
632 for_each_process(t) {
633 if (t->flags & PF_KTHREAD)
634 continue;
635 for_each_thread(t, s)
636 if (s->mm)
637 set_section_perms(perms, n, true, s->mm);
638 }
639 set_section_perms(perms, n, true, current->active_mm);
640 set_section_perms(perms, n, true, &init_mm);
641}
642
643static int __fix_kernmem_perms(void *unused)
644{
645 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
646 return 0;
647}
648
649static void fix_kernmem_perms(void)
650{
651 stop_machine(__fix_kernmem_perms, NULL, NULL);
652}
653
654static int __mark_rodata_ro(void *unused)
655{
656 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
657 return 0;
658}
659
660static int kernel_set_to_readonly __read_mostly;
661
662void mark_rodata_ro(void)
663{
664 kernel_set_to_readonly = 1;
665 stop_machine(__mark_rodata_ro, NULL, NULL);
666 debug_checkwx();
667}
668
669void set_kernel_text_rw(void)
670{
671 if (!kernel_set_to_readonly)
672 return;
673
674 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
675 current->active_mm);
676}
677
678void set_kernel_text_ro(void)
679{
680 if (!kernel_set_to_readonly)
681 return;
682
683 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
684 current->active_mm);
685}
686
687#else
688static inline void fix_kernmem_perms(void) { }
689#endif /* CONFIG_STRICT_KERNEL_RWX */
690
691void free_initmem(void)
692{
693 fix_kernmem_perms();
694
695 poison_init_mem(__init_begin, __init_end - __init_begin);
696 if (!machine_is_integrator() && !machine_is_cintegrator())
697 free_initmem_default(-1);
698}
699
700#ifdef CONFIG_BLK_DEV_INITRD
701void free_initrd_mem(unsigned long start, unsigned long end)
702{
703 if (start == initrd_start)
704 start = round_down(start, PAGE_SIZE);
705 if (end == initrd_end)
706 end = round_up(end, PAGE_SIZE);
707
708 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
709 free_reserved_area((void *)start, (void *)end, -1, "initrd");
710}
711#endif
1/*
2 * linux/arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/kernel.h>
11#include <linux/errno.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/export.h>
17#include <linux/nodemask.h>
18#include <linux/initrd.h>
19#include <linux/of_fdt.h>
20#include <linux/highmem.h>
21#include <linux/gfp.h>
22#include <linux/memblock.h>
23#include <linux/dma-contiguous.h>
24#include <linux/sizes.h>
25#include <linux/stop_machine.h>
26
27#include <asm/cp15.h>
28#include <asm/mach-types.h>
29#include <asm/memblock.h>
30#include <asm/prom.h>
31#include <asm/sections.h>
32#include <asm/setup.h>
33#include <asm/system_info.h>
34#include <asm/tlb.h>
35#include <asm/fixmap.h>
36
37#include <asm/mach/arch.h>
38#include <asm/mach/map.h>
39
40#include "mm.h"
41
42#ifdef CONFIG_CPU_CP15_MMU
43unsigned long __init __clear_cr(unsigned long mask)
44{
45 cr_alignment = cr_alignment & ~mask;
46 return cr_alignment;
47}
48#endif
49
50static phys_addr_t phys_initrd_start __initdata = 0;
51static unsigned long phys_initrd_size __initdata = 0;
52
53static int __init early_initrd(char *p)
54{
55 phys_addr_t start;
56 unsigned long size;
57 char *endp;
58
59 start = memparse(p, &endp);
60 if (*endp == ',') {
61 size = memparse(endp + 1, NULL);
62
63 phys_initrd_start = start;
64 phys_initrd_size = size;
65 }
66 return 0;
67}
68early_param("initrd", early_initrd);
69
70static int __init parse_tag_initrd(const struct tag *tag)
71{
72 pr_warn("ATAG_INITRD is deprecated; "
73 "please update your bootloader.\n");
74 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
75 phys_initrd_size = tag->u.initrd.size;
76 return 0;
77}
78
79__tagtable(ATAG_INITRD, parse_tag_initrd);
80
81static int __init parse_tag_initrd2(const struct tag *tag)
82{
83 phys_initrd_start = tag->u.initrd.start;
84 phys_initrd_size = tag->u.initrd.size;
85 return 0;
86}
87
88__tagtable(ATAG_INITRD2, parse_tag_initrd2);
89
90static void __init find_limits(unsigned long *min, unsigned long *max_low,
91 unsigned long *max_high)
92{
93 *max_low = PFN_DOWN(memblock_get_current_limit());
94 *min = PFN_UP(memblock_start_of_DRAM());
95 *max_high = PFN_DOWN(memblock_end_of_DRAM());
96}
97
98#ifdef CONFIG_ZONE_DMA
99
100phys_addr_t arm_dma_zone_size __read_mostly;
101EXPORT_SYMBOL(arm_dma_zone_size);
102
103/*
104 * The DMA mask corresponding to the maximum bus address allocatable
105 * using GFP_DMA. The default here places no restriction on DMA
106 * allocations. This must be the smallest DMA mask in the system,
107 * so a successful GFP_DMA allocation will always satisfy this.
108 */
109phys_addr_t arm_dma_limit;
110unsigned long arm_dma_pfn_limit;
111
112static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
113 unsigned long dma_size)
114{
115 if (size[0] <= dma_size)
116 return;
117
118 size[ZONE_NORMAL] = size[0] - dma_size;
119 size[ZONE_DMA] = dma_size;
120 hole[ZONE_NORMAL] = hole[0];
121 hole[ZONE_DMA] = 0;
122}
123#endif
124
125void __init setup_dma_zone(const struct machine_desc *mdesc)
126{
127#ifdef CONFIG_ZONE_DMA
128 if (mdesc->dma_zone_size) {
129 arm_dma_zone_size = mdesc->dma_zone_size;
130 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
131 } else
132 arm_dma_limit = 0xffffffff;
133 arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
134#endif
135}
136
137static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
138 unsigned long max_high)
139{
140 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
141 struct memblock_region *reg;
142
143 /*
144 * initialise the zones.
145 */
146 memset(zone_size, 0, sizeof(zone_size));
147
148 /*
149 * The memory size has already been determined. If we need
150 * to do anything fancy with the allocation of this memory
151 * to the zones, now is the time to do it.
152 */
153 zone_size[0] = max_low - min;
154#ifdef CONFIG_HIGHMEM
155 zone_size[ZONE_HIGHMEM] = max_high - max_low;
156#endif
157
158 /*
159 * Calculate the size of the holes.
160 * holes = node_size - sum(bank_sizes)
161 */
162 memcpy(zhole_size, zone_size, sizeof(zhole_size));
163 for_each_memblock(memory, reg) {
164 unsigned long start = memblock_region_memory_base_pfn(reg);
165 unsigned long end = memblock_region_memory_end_pfn(reg);
166
167 if (start < max_low) {
168 unsigned long low_end = min(end, max_low);
169 zhole_size[0] -= low_end - start;
170 }
171#ifdef CONFIG_HIGHMEM
172 if (end > max_low) {
173 unsigned long high_start = max(start, max_low);
174 zhole_size[ZONE_HIGHMEM] -= end - high_start;
175 }
176#endif
177 }
178
179#ifdef CONFIG_ZONE_DMA
180 /*
181 * Adjust the sizes according to any special requirements for
182 * this machine type.
183 */
184 if (arm_dma_zone_size)
185 arm_adjust_dma_zone(zone_size, zhole_size,
186 arm_dma_zone_size >> PAGE_SHIFT);
187#endif
188
189 free_area_init_node(0, zone_size, min, zhole_size);
190}
191
192#ifdef CONFIG_HAVE_ARCH_PFN_VALID
193int pfn_valid(unsigned long pfn)
194{
195 return memblock_is_map_memory(__pfn_to_phys(pfn));
196}
197EXPORT_SYMBOL(pfn_valid);
198#endif
199
200#ifndef CONFIG_SPARSEMEM
201static void __init arm_memory_present(void)
202{
203}
204#else
205static void __init arm_memory_present(void)
206{
207 struct memblock_region *reg;
208
209 for_each_memblock(memory, reg)
210 memory_present(0, memblock_region_memory_base_pfn(reg),
211 memblock_region_memory_end_pfn(reg));
212}
213#endif
214
215static bool arm_memblock_steal_permitted = true;
216
217phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
218{
219 phys_addr_t phys;
220
221 BUG_ON(!arm_memblock_steal_permitted);
222
223 phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
224 memblock_free(phys, size);
225 memblock_remove(phys, size);
226
227 return phys;
228}
229
230void __init arm_memblock_init(const struct machine_desc *mdesc)
231{
232 /* Register the kernel text, kernel data and initrd with memblock. */
233#ifdef CONFIG_XIP_KERNEL
234 memblock_reserve(__pa(_sdata), _end - _sdata);
235#else
236 memblock_reserve(__pa(_stext), _end - _stext);
237#endif
238#ifdef CONFIG_BLK_DEV_INITRD
239 /* FDT scan will populate initrd_start */
240 if (initrd_start && !phys_initrd_size) {
241 phys_initrd_start = __virt_to_phys(initrd_start);
242 phys_initrd_size = initrd_end - initrd_start;
243 }
244 initrd_start = initrd_end = 0;
245 if (phys_initrd_size &&
246 !memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
247 pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
248 (u64)phys_initrd_start, phys_initrd_size);
249 phys_initrd_start = phys_initrd_size = 0;
250 }
251 if (phys_initrd_size &&
252 memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
253 pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
254 (u64)phys_initrd_start, phys_initrd_size);
255 phys_initrd_start = phys_initrd_size = 0;
256 }
257 if (phys_initrd_size) {
258 memblock_reserve(phys_initrd_start, phys_initrd_size);
259
260 /* Now convert initrd to virtual addresses */
261 initrd_start = __phys_to_virt(phys_initrd_start);
262 initrd_end = initrd_start + phys_initrd_size;
263 }
264#endif
265
266 arm_mm_memblock_reserve();
267
268 /* reserve any platform specific memblock areas */
269 if (mdesc->reserve)
270 mdesc->reserve();
271
272 early_init_fdt_reserve_self();
273 early_init_fdt_scan_reserved_mem();
274
275 /* reserve memory for DMA contiguous allocations */
276 dma_contiguous_reserve(arm_dma_limit);
277
278 arm_memblock_steal_permitted = false;
279 memblock_dump_all();
280}
281
282void __init bootmem_init(void)
283{
284 unsigned long min, max_low, max_high;
285
286 memblock_allow_resize();
287 max_low = max_high = 0;
288
289 find_limits(&min, &max_low, &max_high);
290
291 early_memtest((phys_addr_t)min << PAGE_SHIFT,
292 (phys_addr_t)max_low << PAGE_SHIFT);
293
294 /*
295 * Sparsemem tries to allocate bootmem in memory_present(),
296 * so must be done after the fixed reservations
297 */
298 arm_memory_present();
299
300 /*
301 * sparse_init() needs the bootmem allocator up and running.
302 */
303 sparse_init();
304
305 /*
306 * Now free the memory - free_area_init_node needs
307 * the sparse mem_map arrays initialized by sparse_init()
308 * for memmap_init_zone(), otherwise all PFNs are invalid.
309 */
310 zone_sizes_init(min, max_low, max_high);
311
312 /*
313 * This doesn't seem to be used by the Linux memory manager any
314 * more, but is used by ll_rw_block. If we can get rid of it, we
315 * also get rid of some of the stuff above as well.
316 */
317 min_low_pfn = min;
318 max_low_pfn = max_low;
319 max_pfn = max_high;
320}
321
322/*
323 * Poison init memory with an undefined instruction (ARM) or a branch to an
324 * undefined instruction (Thumb).
325 */
326static inline void poison_init_mem(void *s, size_t count)
327{
328 u32 *p = (u32 *)s;
329 for (; count != 0; count -= 4)
330 *p++ = 0xe7fddef0;
331}
332
333static inline void
334free_memmap(unsigned long start_pfn, unsigned long end_pfn)
335{
336 struct page *start_pg, *end_pg;
337 phys_addr_t pg, pgend;
338
339 /*
340 * Convert start_pfn/end_pfn to a struct page pointer.
341 */
342 start_pg = pfn_to_page(start_pfn - 1) + 1;
343 end_pg = pfn_to_page(end_pfn - 1) + 1;
344
345 /*
346 * Convert to physical addresses, and
347 * round start upwards and end downwards.
348 */
349 pg = PAGE_ALIGN(__pa(start_pg));
350 pgend = __pa(end_pg) & PAGE_MASK;
351
352 /*
353 * If there are free pages between these,
354 * free the section of the memmap array.
355 */
356 if (pg < pgend)
357 memblock_free_early(pg, pgend - pg);
358}
359
360/*
361 * The mem_map array can get very big. Free the unused area of the memory map.
362 */
363static void __init free_unused_memmap(void)
364{
365 unsigned long start, prev_end = 0;
366 struct memblock_region *reg;
367
368 /*
369 * This relies on each bank being in address order.
370 * The banks are sorted previously in bootmem_init().
371 */
372 for_each_memblock(memory, reg) {
373 start = memblock_region_memory_base_pfn(reg);
374
375#ifdef CONFIG_SPARSEMEM
376 /*
377 * Take care not to free memmap entries that don't exist
378 * due to SPARSEMEM sections which aren't present.
379 */
380 start = min(start,
381 ALIGN(prev_end, PAGES_PER_SECTION));
382#else
383 /*
384 * Align down here since the VM subsystem insists that the
385 * memmap entries are valid from the bank start aligned to
386 * MAX_ORDER_NR_PAGES.
387 */
388 start = round_down(start, MAX_ORDER_NR_PAGES);
389#endif
390 /*
391 * If we had a previous bank, and there is a space
392 * between the current bank and the previous, free it.
393 */
394 if (prev_end && prev_end < start)
395 free_memmap(prev_end, start);
396
397 /*
398 * Align up here since the VM subsystem insists that the
399 * memmap entries are valid from the bank end aligned to
400 * MAX_ORDER_NR_PAGES.
401 */
402 prev_end = ALIGN(memblock_region_memory_end_pfn(reg),
403 MAX_ORDER_NR_PAGES);
404 }
405
406#ifdef CONFIG_SPARSEMEM
407 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
408 free_memmap(prev_end,
409 ALIGN(prev_end, PAGES_PER_SECTION));
410#endif
411}
412
413#ifdef CONFIG_HIGHMEM
414static inline void free_area_high(unsigned long pfn, unsigned long end)
415{
416 for (; pfn < end; pfn++)
417 free_highmem_page(pfn_to_page(pfn));
418}
419#endif
420
421static void __init free_highpages(void)
422{
423#ifdef CONFIG_HIGHMEM
424 unsigned long max_low = max_low_pfn;
425 struct memblock_region *mem, *res;
426
427 /* set highmem page free */
428 for_each_memblock(memory, mem) {
429 unsigned long start = memblock_region_memory_base_pfn(mem);
430 unsigned long end = memblock_region_memory_end_pfn(mem);
431
432 /* Ignore complete lowmem entries */
433 if (end <= max_low)
434 continue;
435
436 if (memblock_is_nomap(mem))
437 continue;
438
439 /* Truncate partial highmem entries */
440 if (start < max_low)
441 start = max_low;
442
443 /* Find and exclude any reserved regions */
444 for_each_memblock(reserved, res) {
445 unsigned long res_start, res_end;
446
447 res_start = memblock_region_reserved_base_pfn(res);
448 res_end = memblock_region_reserved_end_pfn(res);
449
450 if (res_end < start)
451 continue;
452 if (res_start < start)
453 res_start = start;
454 if (res_start > end)
455 res_start = end;
456 if (res_end > end)
457 res_end = end;
458 if (res_start != start)
459 free_area_high(start, res_start);
460 start = res_end;
461 if (start == end)
462 break;
463 }
464
465 /* And now free anything which remains */
466 if (start < end)
467 free_area_high(start, end);
468 }
469#endif
470}
471
472/*
473 * mem_init() marks the free areas in the mem_map and tells us how much
474 * memory is free. This is done after various parts of the system have
475 * claimed their memory after the kernel image.
476 */
477void __init mem_init(void)
478{
479#ifdef CONFIG_HAVE_TCM
480 /* These pointers are filled in on TCM detection */
481 extern u32 dtcm_end;
482 extern u32 itcm_end;
483#endif
484
485 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
486
487 /* this will put all unused low memory onto the freelists */
488 free_unused_memmap();
489 free_all_bootmem();
490
491#ifdef CONFIG_SA1111
492 /* now that our DMA memory is actually so designated, we can free it */
493 free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
494#endif
495
496 free_highpages();
497
498 mem_init_print_info(NULL);
499
500#define MLK(b, t) b, t, ((t) - (b)) >> 10
501#define MLM(b, t) b, t, ((t) - (b)) >> 20
502#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
503
504 pr_notice("Virtual kernel memory layout:\n"
505 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
506#ifdef CONFIG_HAVE_TCM
507 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
508 " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
509#endif
510 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
511 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
512 " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
513#ifdef CONFIG_HIGHMEM
514 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
515#endif
516#ifdef CONFIG_MODULES
517 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
518#endif
519 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
520 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
521 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
522 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
523
524 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
525 (PAGE_SIZE)),
526#ifdef CONFIG_HAVE_TCM
527 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
528 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
529#endif
530 MLK(FIXADDR_START, FIXADDR_END),
531 MLM(VMALLOC_START, VMALLOC_END),
532 MLM(PAGE_OFFSET, (unsigned long)high_memory),
533#ifdef CONFIG_HIGHMEM
534 MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
535 (PAGE_SIZE)),
536#endif
537#ifdef CONFIG_MODULES
538 MLM(MODULES_VADDR, MODULES_END),
539#endif
540
541 MLK_ROUNDUP(_text, _etext),
542 MLK_ROUNDUP(__init_begin, __init_end),
543 MLK_ROUNDUP(_sdata, _edata),
544 MLK_ROUNDUP(__bss_start, __bss_stop));
545
546#undef MLK
547#undef MLM
548#undef MLK_ROUNDUP
549
550 /*
551 * Check boundaries twice: Some fundamental inconsistencies can
552 * be detected at build time already.
553 */
554#ifdef CONFIG_MMU
555 BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
556 BUG_ON(TASK_SIZE > MODULES_VADDR);
557#endif
558
559#ifdef CONFIG_HIGHMEM
560 BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
561 BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
562#endif
563
564 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
565 extern int sysctl_overcommit_memory;
566 /*
567 * On a machine this small we won't get
568 * anywhere without overcommit, so turn
569 * it on by default.
570 */
571 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
572 }
573}
574
575#ifdef CONFIG_DEBUG_RODATA
576struct section_perm {
577 const char *name;
578 unsigned long start;
579 unsigned long end;
580 pmdval_t mask;
581 pmdval_t prot;
582 pmdval_t clear;
583};
584
585/* First section-aligned location at or after __start_rodata. */
586extern char __start_rodata_section_aligned[];
587
588static struct section_perm nx_perms[] = {
589 /* Make pages tables, etc before _stext RW (set NX). */
590 {
591 .name = "pre-text NX",
592 .start = PAGE_OFFSET,
593 .end = (unsigned long)_stext,
594 .mask = ~PMD_SECT_XN,
595 .prot = PMD_SECT_XN,
596 },
597 /* Make init RW (set NX). */
598 {
599 .name = "init NX",
600 .start = (unsigned long)__init_begin,
601 .end = (unsigned long)_sdata,
602 .mask = ~PMD_SECT_XN,
603 .prot = PMD_SECT_XN,
604 },
605 /* Make rodata NX (set RO in ro_perms below). */
606 {
607 .name = "rodata NX",
608 .start = (unsigned long)__start_rodata_section_aligned,
609 .end = (unsigned long)__init_begin,
610 .mask = ~PMD_SECT_XN,
611 .prot = PMD_SECT_XN,
612 },
613};
614
615static struct section_perm ro_perms[] = {
616 /* Make kernel code and rodata RX (set RO). */
617 {
618 .name = "text/rodata RO",
619 .start = (unsigned long)_stext,
620 .end = (unsigned long)__init_begin,
621#ifdef CONFIG_ARM_LPAE
622 .mask = ~L_PMD_SECT_RDONLY,
623 .prot = L_PMD_SECT_RDONLY,
624#else
625 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
626 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
627 .clear = PMD_SECT_AP_WRITE,
628#endif
629 },
630};
631
632/*
633 * Updates section permissions only for the current mm (sections are
634 * copied into each mm). During startup, this is the init_mm. Is only
635 * safe to be called with preemption disabled, as under stop_machine().
636 */
637static inline void section_update(unsigned long addr, pmdval_t mask,
638 pmdval_t prot, struct mm_struct *mm)
639{
640 pmd_t *pmd;
641
642 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
643
644#ifdef CONFIG_ARM_LPAE
645 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
646#else
647 if (addr & SECTION_SIZE)
648 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
649 else
650 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
651#endif
652 flush_pmd_entry(pmd);
653 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
654}
655
656/* Make sure extended page tables are in use. */
657static inline bool arch_has_strict_perms(void)
658{
659 if (cpu_architecture() < CPU_ARCH_ARMv6)
660 return false;
661
662 return !!(get_cr() & CR_XP);
663}
664
665void set_section_perms(struct section_perm *perms, int n, bool set,
666 struct mm_struct *mm)
667{
668 size_t i;
669 unsigned long addr;
670
671 if (!arch_has_strict_perms())
672 return;
673
674 for (i = 0; i < n; i++) {
675 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
676 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
677 pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
678 perms[i].name, perms[i].start, perms[i].end,
679 SECTION_SIZE);
680 continue;
681 }
682
683 for (addr = perms[i].start;
684 addr < perms[i].end;
685 addr += SECTION_SIZE)
686 section_update(addr, perms[i].mask,
687 set ? perms[i].prot : perms[i].clear, mm);
688 }
689
690}
691
692static void update_sections_early(struct section_perm perms[], int n)
693{
694 struct task_struct *t, *s;
695
696 read_lock(&tasklist_lock);
697 for_each_process(t) {
698 if (t->flags & PF_KTHREAD)
699 continue;
700 for_each_thread(t, s)
701 set_section_perms(perms, n, true, s->mm);
702 }
703 read_unlock(&tasklist_lock);
704 set_section_perms(perms, n, true, current->active_mm);
705 set_section_perms(perms, n, true, &init_mm);
706}
707
708int __fix_kernmem_perms(void *unused)
709{
710 update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
711 return 0;
712}
713
714void fix_kernmem_perms(void)
715{
716 stop_machine(__fix_kernmem_perms, NULL, NULL);
717}
718
719int __mark_rodata_ro(void *unused)
720{
721 update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
722 return 0;
723}
724
725void mark_rodata_ro(void)
726{
727 stop_machine(__mark_rodata_ro, NULL, NULL);
728}
729
730void set_kernel_text_rw(void)
731{
732 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
733 current->active_mm);
734}
735
736void set_kernel_text_ro(void)
737{
738 set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
739 current->active_mm);
740}
741
742#else
743static inline void fix_kernmem_perms(void) { }
744#endif /* CONFIG_DEBUG_RODATA */
745
746void free_tcmmem(void)
747{
748#ifdef CONFIG_HAVE_TCM
749 extern char __tcm_start, __tcm_end;
750
751 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
752 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
753#endif
754}
755
756void free_initmem(void)
757{
758 fix_kernmem_perms();
759 free_tcmmem();
760
761 poison_init_mem(__init_begin, __init_end - __init_begin);
762 if (!machine_is_integrator() && !machine_is_cintegrator())
763 free_initmem_default(-1);
764}
765
766#ifdef CONFIG_BLK_DEV_INITRD
767
768static int keep_initrd;
769
770void free_initrd_mem(unsigned long start, unsigned long end)
771{
772 if (!keep_initrd) {
773 if (start == initrd_start)
774 start = round_down(start, PAGE_SIZE);
775 if (end == initrd_end)
776 end = round_up(end, PAGE_SIZE);
777
778 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
779 free_reserved_area((void *)start, (void *)end, -1, "initrd");
780 }
781}
782
783static int __init keepinitrd_setup(char *__unused)
784{
785 keep_initrd = 1;
786 return 1;
787}
788
789__setup("keepinitrd", keepinitrd_setup);
790#endif