Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
12 */
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/export.h>
16#include <linux/screen_info.h>
17#include <linux/memblock.h>
18#include <linux/bootmem.h>
19#include <linux/initrd.h>
20#include <linux/root_dev.h>
21#include <linux/highmem.h>
22#include <linux/console.h>
23#include <linux/pfn.h>
24#include <linux/debugfs.h>
25#include <linux/kexec.h>
26#include <linux/sizes.h>
27#include <linux/device.h>
28#include <linux/dma-contiguous.h>
29
30#include <asm/addrspace.h>
31#include <asm/bootinfo.h>
32#include <asm/bugs.h>
33#include <asm/cache.h>
34#include <asm/cdmm.h>
35#include <asm/cpu.h>
36#include <asm/debug.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/smp-ops.h>
40#include <asm/prom.h>
41
42#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
43const char __section(.appended_dtb) __appended_dtb[0x100000];
44#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
45
46struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
47
48EXPORT_SYMBOL(cpu_data);
49
50#ifdef CONFIG_VT
51struct screen_info screen_info;
52#endif
53
54/*
55 * Despite it's name this variable is even if we don't have PCI
56 */
57unsigned int PCI_DMA_BUS_IS_PHYS;
58
59EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
60
61/*
62 * Setup information
63 *
64 * These are initialized so they are in the .data section
65 */
66unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
67
68EXPORT_SYMBOL(mips_machtype);
69
70struct boot_mem_map boot_mem_map;
71
72static char __initdata command_line[COMMAND_LINE_SIZE];
73char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
74
75#ifdef CONFIG_CMDLINE_BOOL
76static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
77#endif
78
79/*
80 * mips_io_port_base is the begin of the address space to which x86 style
81 * I/O ports are mapped.
82 */
83const unsigned long mips_io_port_base = -1;
84EXPORT_SYMBOL(mips_io_port_base);
85
86static struct resource code_resource = { .name = "Kernel code", };
87static struct resource data_resource = { .name = "Kernel data", };
88
89static void *detect_magic __initdata = detect_memory_region;
90
91void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
92{
93 int x = boot_mem_map.nr_map;
94 int i;
95
96 /* Sanity check */
97 if (start + size < start) {
98 pr_warn("Trying to add an invalid memory region, skipped\n");
99 return;
100 }
101
102 /*
103 * Try to merge with existing entry, if any.
104 */
105 for (i = 0; i < boot_mem_map.nr_map; i++) {
106 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
107 unsigned long top;
108
109 if (entry->type != type)
110 continue;
111
112 if (start + size < entry->addr)
113 continue; /* no overlap */
114
115 if (entry->addr + entry->size < start)
116 continue; /* no overlap */
117
118 top = max(entry->addr + entry->size, start + size);
119 entry->addr = min(entry->addr, start);
120 entry->size = top - entry->addr;
121
122 return;
123 }
124
125 if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
126 pr_err("Ooops! Too many entries in the memory map!\n");
127 return;
128 }
129
130 boot_mem_map.map[x].addr = start;
131 boot_mem_map.map[x].size = size;
132 boot_mem_map.map[x].type = type;
133 boot_mem_map.nr_map++;
134}
135
136void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
137{
138 void *dm = &detect_magic;
139 phys_addr_t size;
140
141 for (size = sz_min; size < sz_max; size <<= 1) {
142 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
143 break;
144 }
145
146 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
147 ((unsigned long long) size) / SZ_1M,
148 (unsigned long long) start,
149 ((unsigned long long) sz_min) / SZ_1M,
150 ((unsigned long long) sz_max) / SZ_1M);
151
152 add_memory_region(start, size, BOOT_MEM_RAM);
153}
154
155static void __init print_memory_map(void)
156{
157 int i;
158 const int field = 2 * sizeof(unsigned long);
159
160 for (i = 0; i < boot_mem_map.nr_map; i++) {
161 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
162 field, (unsigned long long) boot_mem_map.map[i].size,
163 field, (unsigned long long) boot_mem_map.map[i].addr);
164
165 switch (boot_mem_map.map[i].type) {
166 case BOOT_MEM_RAM:
167 printk(KERN_CONT "(usable)\n");
168 break;
169 case BOOT_MEM_INIT_RAM:
170 printk(KERN_CONT "(usable after init)\n");
171 break;
172 case BOOT_MEM_ROM_DATA:
173 printk(KERN_CONT "(ROM data)\n");
174 break;
175 case BOOT_MEM_RESERVED:
176 printk(KERN_CONT "(reserved)\n");
177 break;
178 default:
179 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
180 break;
181 }
182 }
183}
184
185/*
186 * Manage initrd
187 */
188#ifdef CONFIG_BLK_DEV_INITRD
189
190static int __init rd_start_early(char *p)
191{
192 unsigned long start = memparse(p, &p);
193
194#ifdef CONFIG_64BIT
195 /* Guess if the sign extension was forgotten by bootloader */
196 if (start < XKPHYS)
197 start = (int)start;
198#endif
199 initrd_start = start;
200 initrd_end += start;
201 return 0;
202}
203early_param("rd_start", rd_start_early);
204
205static int __init rd_size_early(char *p)
206{
207 initrd_end += memparse(p, &p);
208 return 0;
209}
210early_param("rd_size", rd_size_early);
211
212/* it returns the next free pfn after initrd */
213static unsigned long __init init_initrd(void)
214{
215 unsigned long end;
216
217 /*
218 * Board specific code or command line parser should have
219 * already set up initrd_start and initrd_end. In these cases
220 * perfom sanity checks and use them if all looks good.
221 */
222 if (!initrd_start || initrd_end <= initrd_start)
223 goto disable;
224
225 if (initrd_start & ~PAGE_MASK) {
226 pr_err("initrd start must be page aligned\n");
227 goto disable;
228 }
229 if (initrd_start < PAGE_OFFSET) {
230 pr_err("initrd start < PAGE_OFFSET\n");
231 goto disable;
232 }
233
234 /*
235 * Sanitize initrd addresses. For example firmware
236 * can't guess if they need to pass them through
237 * 64-bits values if the kernel has been built in pure
238 * 32-bit. We need also to switch from KSEG0 to XKPHYS
239 * addresses now, so the code can now safely use __pa().
240 */
241 end = __pa(initrd_end);
242 initrd_end = (unsigned long)__va(end);
243 initrd_start = (unsigned long)__va(__pa(initrd_start));
244
245 ROOT_DEV = Root_RAM0;
246 return PFN_UP(end);
247disable:
248 initrd_start = 0;
249 initrd_end = 0;
250 return 0;
251}
252
253static void __init finalize_initrd(void)
254{
255 unsigned long size = initrd_end - initrd_start;
256
257 if (size == 0) {
258 printk(KERN_INFO "Initrd not found or empty");
259 goto disable;
260 }
261 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
262 printk(KERN_ERR "Initrd extends beyond end of memory");
263 goto disable;
264 }
265
266 reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
267 initrd_below_start_ok = 1;
268
269 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
270 initrd_start, size);
271 return;
272disable:
273 printk(KERN_CONT " - disabling initrd\n");
274 initrd_start = 0;
275 initrd_end = 0;
276}
277
278#else /* !CONFIG_BLK_DEV_INITRD */
279
280static unsigned long __init init_initrd(void)
281{
282 return 0;
283}
284
285#define finalize_initrd() do {} while (0)
286
287#endif
288
289/*
290 * Initialize the bootmem allocator. It also setup initrd related data
291 * if needed.
292 */
293#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
294
295static void __init bootmem_init(void)
296{
297 init_initrd();
298 finalize_initrd();
299}
300
301#else /* !CONFIG_SGI_IP27 */
302
303static void __init bootmem_init(void)
304{
305 unsigned long reserved_end;
306 unsigned long mapstart = ~0UL;
307 unsigned long bootmap_size;
308 int i;
309
310 /*
311 * Sanity check any INITRD first. We don't take it into account
312 * for bootmem setup initially, rely on the end-of-kernel-code
313 * as our memory range starting point. Once bootmem is inited we
314 * will reserve the area used for the initrd.
315 */
316 init_initrd();
317 reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
318
319 /*
320 * max_low_pfn is not a number of pages. The number of pages
321 * of the system is given by 'max_low_pfn - min_low_pfn'.
322 */
323 min_low_pfn = ~0UL;
324 max_low_pfn = 0;
325
326 /*
327 * Find the highest page frame number we have available.
328 */
329 for (i = 0; i < boot_mem_map.nr_map; i++) {
330 unsigned long start, end;
331
332 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
333 continue;
334
335 start = PFN_UP(boot_mem_map.map[i].addr);
336 end = PFN_DOWN(boot_mem_map.map[i].addr
337 + boot_mem_map.map[i].size);
338
339 if (end > max_low_pfn)
340 max_low_pfn = end;
341 if (start < min_low_pfn)
342 min_low_pfn = start;
343 if (end <= reserved_end)
344 continue;
345#ifdef CONFIG_BLK_DEV_INITRD
346 /* Skip zones before initrd and initrd itself */
347 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
348 continue;
349#endif
350 if (start >= mapstart)
351 continue;
352 mapstart = max(reserved_end, start);
353 }
354
355 if (min_low_pfn >= max_low_pfn)
356 panic("Incorrect memory mapping !!!");
357 if (min_low_pfn > ARCH_PFN_OFFSET) {
358 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
359 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
360 min_low_pfn - ARCH_PFN_OFFSET);
361 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
362 pr_info("%lu free pages won't be used\n",
363 ARCH_PFN_OFFSET - min_low_pfn);
364 }
365 min_low_pfn = ARCH_PFN_OFFSET;
366
367 /*
368 * Determine low and high memory ranges
369 */
370 max_pfn = max_low_pfn;
371 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
372#ifdef CONFIG_HIGHMEM
373 highstart_pfn = PFN_DOWN(HIGHMEM_START);
374 highend_pfn = max_low_pfn;
375#endif
376 max_low_pfn = PFN_DOWN(HIGHMEM_START);
377 }
378
379#ifdef CONFIG_BLK_DEV_INITRD
380 /*
381 * mapstart should be after initrd_end
382 */
383 if (initrd_end)
384 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
385#endif
386
387 /*
388 * Initialize the boot-time allocator with low memory only.
389 */
390 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
391 min_low_pfn, max_low_pfn);
392
393
394 for (i = 0; i < boot_mem_map.nr_map; i++) {
395 unsigned long start, end;
396
397 start = PFN_UP(boot_mem_map.map[i].addr);
398 end = PFN_DOWN(boot_mem_map.map[i].addr
399 + boot_mem_map.map[i].size);
400
401 if (start <= min_low_pfn)
402 start = min_low_pfn;
403 if (start >= end)
404 continue;
405
406#ifndef CONFIG_HIGHMEM
407 if (end > max_low_pfn)
408 end = max_low_pfn;
409
410 /*
411 * ... finally, is the area going away?
412 */
413 if (end <= start)
414 continue;
415#endif
416
417 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
418 }
419
420 /*
421 * Register fully available low RAM pages with the bootmem allocator.
422 */
423 for (i = 0; i < boot_mem_map.nr_map; i++) {
424 unsigned long start, end, size;
425
426 start = PFN_UP(boot_mem_map.map[i].addr);
427 end = PFN_DOWN(boot_mem_map.map[i].addr
428 + boot_mem_map.map[i].size);
429
430 /*
431 * Reserve usable memory.
432 */
433 switch (boot_mem_map.map[i].type) {
434 case BOOT_MEM_RAM:
435 break;
436 case BOOT_MEM_INIT_RAM:
437 memory_present(0, start, end);
438 continue;
439 default:
440 /* Not usable memory */
441 continue;
442 }
443
444 /*
445 * We are rounding up the start address of usable memory
446 * and at the end of the usable range downwards.
447 */
448 if (start >= max_low_pfn)
449 continue;
450 if (start < reserved_end)
451 start = reserved_end;
452 if (end > max_low_pfn)
453 end = max_low_pfn;
454
455 /*
456 * ... finally, is the area going away?
457 */
458 if (end <= start)
459 continue;
460 size = end - start;
461
462 /* Register lowmem ranges */
463 free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
464 memory_present(0, start, end);
465 }
466
467 /*
468 * Reserve the bootmap memory.
469 */
470 reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
471
472 /*
473 * Reserve initrd memory if needed.
474 */
475 finalize_initrd();
476}
477
478#endif /* CONFIG_SGI_IP27 */
479
480/*
481 * arch_mem_init - initialize memory management subsystem
482 *
483 * o plat_mem_setup() detects the memory configuration and will record detected
484 * memory areas using add_memory_region.
485 *
486 * At this stage the memory configuration of the system is known to the
487 * kernel but generic memory management system is still entirely uninitialized.
488 *
489 * o bootmem_init()
490 * o sparse_init()
491 * o paging_init()
492 * o dma_contiguous_reserve()
493 *
494 * At this stage the bootmem allocator is ready to use.
495 *
496 * NOTE: historically plat_mem_setup did the entire platform initialization.
497 * This was rather impractical because it meant plat_mem_setup had to
498 * get away without any kind of memory allocator. To keep old code from
499 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
500 * initialization hook for anything else was introduced.
501 */
502
503static int usermem __initdata;
504
505static int __init early_parse_mem(char *p)
506{
507 phys_addr_t start, size;
508
509 /*
510 * If a user specifies memory size, we
511 * blow away any automatically generated
512 * size.
513 */
514 if (usermem == 0) {
515 boot_mem_map.nr_map = 0;
516 usermem = 1;
517 }
518 start = 0;
519 size = memparse(p, &p);
520 if (*p == '@')
521 start = memparse(p + 1, &p);
522
523 add_memory_region(start, size, BOOT_MEM_RAM);
524 return 0;
525}
526early_param("mem", early_parse_mem);
527
528#ifdef CONFIG_PROC_VMCORE
529unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
530static int __init early_parse_elfcorehdr(char *p)
531{
532 int i;
533
534 setup_elfcorehdr = memparse(p, &p);
535
536 for (i = 0; i < boot_mem_map.nr_map; i++) {
537 unsigned long start = boot_mem_map.map[i].addr;
538 unsigned long end = (boot_mem_map.map[i].addr +
539 boot_mem_map.map[i].size);
540 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
541 /*
542 * Reserve from the elf core header to the end of
543 * the memory segment, that should all be kdump
544 * reserved memory.
545 */
546 setup_elfcorehdr_size = end - setup_elfcorehdr;
547 break;
548 }
549 }
550 /*
551 * If we don't find it in the memory map, then we shouldn't
552 * have to worry about it, as the new kernel won't use it.
553 */
554 return 0;
555}
556early_param("elfcorehdr", early_parse_elfcorehdr);
557#endif
558
559static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
560{
561 phys_addr_t size;
562 int i;
563
564 size = end - mem;
565 if (!size)
566 return;
567
568 /* Make sure it is in the boot_mem_map */
569 for (i = 0; i < boot_mem_map.nr_map; i++) {
570 if (mem >= boot_mem_map.map[i].addr &&
571 mem < (boot_mem_map.map[i].addr +
572 boot_mem_map.map[i].size))
573 return;
574 }
575 add_memory_region(mem, size, type);
576}
577
578#ifdef CONFIG_KEXEC
579static inline unsigned long long get_total_mem(void)
580{
581 unsigned long long total;
582
583 total = max_pfn - min_low_pfn;
584 return total << PAGE_SHIFT;
585}
586
587static void __init mips_parse_crashkernel(void)
588{
589 unsigned long long total_mem;
590 unsigned long long crash_size, crash_base;
591 int ret;
592
593 total_mem = get_total_mem();
594 ret = parse_crashkernel(boot_command_line, total_mem,
595 &crash_size, &crash_base);
596 if (ret != 0 || crash_size <= 0)
597 return;
598
599 crashk_res.start = crash_base;
600 crashk_res.end = crash_base + crash_size - 1;
601}
602
603static void __init request_crashkernel(struct resource *res)
604{
605 int ret;
606
607 ret = request_resource(res, &crashk_res);
608 if (!ret)
609 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
610 (unsigned long)((crashk_res.end -
611 crashk_res.start + 1) >> 20),
612 (unsigned long)(crashk_res.start >> 20));
613}
614#else /* !defined(CONFIG_KEXEC) */
615static void __init mips_parse_crashkernel(void)
616{
617}
618
619static void __init request_crashkernel(struct resource *res)
620{
621}
622#endif /* !defined(CONFIG_KEXEC) */
623
624#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
625#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
626#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
627
628static void __init arch_mem_init(char **cmdline_p)
629{
630 struct memblock_region *reg;
631 extern void plat_mem_setup(void);
632
633 /* call board setup routine */
634 plat_mem_setup();
635
636 /*
637 * Make sure all kernel memory is in the maps. The "UP" and
638 * "DOWN" are opposite for initdata since if it crosses over
639 * into another memory section you don't want that to be
640 * freed when the initdata is freed.
641 */
642 arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
643 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
644 BOOT_MEM_RAM);
645 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
646 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
647 BOOT_MEM_INIT_RAM);
648
649 pr_info("Determined physical RAM map:\n");
650 print_memory_map();
651
652#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
653 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
654#else
655 if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
656 (USE_DTB_CMDLINE && !boot_command_line[0]))
657 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
658
659 if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
660 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
661 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
662 }
663
664#if defined(CONFIG_CMDLINE_BOOL)
665 if (builtin_cmdline[0]) {
666 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
667 strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
668 }
669#endif
670#endif
671 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
672
673 *cmdline_p = command_line;
674
675 parse_early_param();
676
677 if (usermem) {
678 pr_info("User-defined physical RAM map:\n");
679 print_memory_map();
680 }
681
682 bootmem_init();
683#ifdef CONFIG_PROC_VMCORE
684 if (setup_elfcorehdr && setup_elfcorehdr_size) {
685 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
686 setup_elfcorehdr, setup_elfcorehdr_size);
687 reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
688 BOOTMEM_DEFAULT);
689 }
690#endif
691
692 mips_parse_crashkernel();
693#ifdef CONFIG_KEXEC
694 if (crashk_res.start != crashk_res.end)
695 reserve_bootmem(crashk_res.start,
696 crashk_res.end - crashk_res.start + 1,
697 BOOTMEM_DEFAULT);
698#endif
699 device_tree_init();
700 sparse_init();
701 plat_swiotlb_setup();
702 paging_init();
703
704 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
705 /* Tell bootmem about cma reserved memblock section */
706 for_each_memblock(reserved, reg)
707 if (reg->size != 0)
708 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
709}
710
711static void __init resource_init(void)
712{
713 int i;
714
715 if (UNCAC_BASE != IO_BASE)
716 return;
717
718 code_resource.start = __pa_symbol(&_text);
719 code_resource.end = __pa_symbol(&_etext) - 1;
720 data_resource.start = __pa_symbol(&_etext);
721 data_resource.end = __pa_symbol(&_edata) - 1;
722
723 for (i = 0; i < boot_mem_map.nr_map; i++) {
724 struct resource *res;
725 unsigned long start, end;
726
727 start = boot_mem_map.map[i].addr;
728 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
729 if (start >= HIGHMEM_START)
730 continue;
731 if (end >= HIGHMEM_START)
732 end = HIGHMEM_START - 1;
733
734 res = alloc_bootmem(sizeof(struct resource));
735
736 res->start = start;
737 res->end = end;
738 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
739
740 switch (boot_mem_map.map[i].type) {
741 case BOOT_MEM_RAM:
742 case BOOT_MEM_INIT_RAM:
743 case BOOT_MEM_ROM_DATA:
744 res->name = "System RAM";
745 res->flags |= IORESOURCE_SYSRAM;
746 break;
747 case BOOT_MEM_RESERVED:
748 default:
749 res->name = "reserved";
750 }
751
752 request_resource(&iomem_resource, res);
753
754 /*
755 * We don't know which RAM region contains kernel data,
756 * so we try it repeatedly and let the resource manager
757 * test it.
758 */
759 request_resource(res, &code_resource);
760 request_resource(res, &data_resource);
761 request_crashkernel(res);
762 }
763}
764
765#ifdef CONFIG_SMP
766static void __init prefill_possible_map(void)
767{
768 int i, possible = num_possible_cpus();
769
770 if (possible > nr_cpu_ids)
771 possible = nr_cpu_ids;
772
773 for (i = 0; i < possible; i++)
774 set_cpu_possible(i, true);
775 for (; i < NR_CPUS; i++)
776 set_cpu_possible(i, false);
777
778 nr_cpu_ids = possible;
779}
780#else
781static inline void prefill_possible_map(void) {}
782#endif
783
784void __init setup_arch(char **cmdline_p)
785{
786 cpu_probe();
787 mips_cm_probe();
788 prom_init();
789
790 setup_early_fdc_console();
791#ifdef CONFIG_EARLY_PRINTK
792 setup_early_printk();
793#endif
794 cpu_report();
795 check_bugs_early();
796
797#if defined(CONFIG_VT)
798#if defined(CONFIG_VGA_CONSOLE)
799 conswitchp = &vga_con;
800#elif defined(CONFIG_DUMMY_CONSOLE)
801 conswitchp = &dummy_con;
802#endif
803#endif
804
805 arch_mem_init(cmdline_p);
806
807 resource_init();
808 plat_smp_setup();
809 prefill_possible_map();
810
811 cpu_cache_init();
812}
813
814unsigned long kernelsp[NR_CPUS];
815unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
816
817#ifdef CONFIG_DEBUG_FS
818struct dentry *mips_debugfs_dir;
819static int __init debugfs_mips(void)
820{
821 struct dentry *d;
822
823 d = debugfs_create_dir("mips", NULL);
824 if (!d)
825 return -ENOMEM;
826 mips_debugfs_dir = d;
827 return 0;
828}
829arch_initcall(debugfs_mips);
830#endif
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
12 */
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/export.h>
16#include <linux/screen_info.h>
17#include <linux/memblock.h>
18#include <linux/initrd.h>
19#include <linux/root_dev.h>
20#include <linux/highmem.h>
21#include <linux/console.h>
22#include <linux/pfn.h>
23#include <linux/debugfs.h>
24#include <linux/kexec.h>
25#include <linux/sizes.h>
26#include <linux/device.h>
27#include <linux/dma-contiguous.h>
28#include <linux/decompress/generic.h>
29#include <linux/of_fdt.h>
30#include <linux/of_reserved_mem.h>
31#include <linux/dmi.h>
32
33#include <asm/addrspace.h>
34#include <asm/bootinfo.h>
35#include <asm/bugs.h>
36#include <asm/cache.h>
37#include <asm/cdmm.h>
38#include <asm/cpu.h>
39#include <asm/debug.h>
40#include <asm/dma-coherence.h>
41#include <asm/sections.h>
42#include <asm/setup.h>
43#include <asm/smp-ops.h>
44#include <asm/prom.h>
45
46#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
47const char __section(.appended_dtb) __appended_dtb[0x100000];
48#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
49
50struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
51
52EXPORT_SYMBOL(cpu_data);
53
54#ifdef CONFIG_VT
55struct screen_info screen_info;
56#endif
57
58/*
59 * Setup information
60 *
61 * These are initialized so they are in the .data section
62 */
63unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
64
65EXPORT_SYMBOL(mips_machtype);
66
67static char __initdata command_line[COMMAND_LINE_SIZE];
68char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
69
70#ifdef CONFIG_CMDLINE_BOOL
71static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
72#else
73static const char builtin_cmdline[] __initconst = "";
74#endif
75
76/*
77 * mips_io_port_base is the begin of the address space to which x86 style
78 * I/O ports are mapped.
79 */
80unsigned long mips_io_port_base = -1;
81EXPORT_SYMBOL(mips_io_port_base);
82
83static struct resource code_resource = { .name = "Kernel code", };
84static struct resource data_resource = { .name = "Kernel data", };
85static struct resource bss_resource = { .name = "Kernel bss", };
86
87static void *detect_magic __initdata = detect_memory_region;
88
89#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
90unsigned long ARCH_PFN_OFFSET;
91EXPORT_SYMBOL(ARCH_PFN_OFFSET);
92#endif
93
94void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
95{
96 /*
97 * Note: This function only exists for historical reason,
98 * new code should use memblock_add or memblock_add_node instead.
99 */
100
101 /*
102 * If the region reaches the top of the physical address space, adjust
103 * the size slightly so that (start + size) doesn't overflow
104 */
105 if (start + size - 1 == PHYS_ADDR_MAX)
106 --size;
107
108 /* Sanity check */
109 if (start + size < start) {
110 pr_warn("Trying to add an invalid memory region, skipped\n");
111 return;
112 }
113
114 if (start < PHYS_OFFSET)
115 return;
116
117 memblock_add(start, size);
118 /* Reserve any memory except the ordinary RAM ranges. */
119 switch (type) {
120 case BOOT_MEM_RAM:
121 break;
122
123 case BOOT_MEM_NOMAP: /* Discard the range from the system. */
124 memblock_remove(start, size);
125 break;
126
127 default: /* Reserve the rest of the memory types at boot time */
128 memblock_reserve(start, size);
129 break;
130 }
131}
132
133void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
134{
135 void *dm = &detect_magic;
136 phys_addr_t size;
137
138 for (size = sz_min; size < sz_max; size <<= 1) {
139 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
140 break;
141 }
142
143 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
144 ((unsigned long long) size) / SZ_1M,
145 (unsigned long long) start,
146 ((unsigned long long) sz_min) / SZ_1M,
147 ((unsigned long long) sz_max) / SZ_1M);
148
149 add_memory_region(start, size, BOOT_MEM_RAM);
150}
151
152/*
153 * Manage initrd
154 */
155#ifdef CONFIG_BLK_DEV_INITRD
156
157static int __init rd_start_early(char *p)
158{
159 unsigned long start = memparse(p, &p);
160
161#ifdef CONFIG_64BIT
162 /* Guess if the sign extension was forgotten by bootloader */
163 if (start < XKPHYS)
164 start = (int)start;
165#endif
166 initrd_start = start;
167 initrd_end += start;
168 return 0;
169}
170early_param("rd_start", rd_start_early);
171
172static int __init rd_size_early(char *p)
173{
174 initrd_end += memparse(p, &p);
175 return 0;
176}
177early_param("rd_size", rd_size_early);
178
179/* it returns the next free pfn after initrd */
180static unsigned long __init init_initrd(void)
181{
182 unsigned long end;
183
184 /*
185 * Board specific code or command line parser should have
186 * already set up initrd_start and initrd_end. In these cases
187 * perfom sanity checks and use them if all looks good.
188 */
189 if (!initrd_start || initrd_end <= initrd_start)
190 goto disable;
191
192 if (initrd_start & ~PAGE_MASK) {
193 pr_err("initrd start must be page aligned\n");
194 goto disable;
195 }
196 if (initrd_start < PAGE_OFFSET) {
197 pr_err("initrd start < PAGE_OFFSET\n");
198 goto disable;
199 }
200
201 /*
202 * Sanitize initrd addresses. For example firmware
203 * can't guess if they need to pass them through
204 * 64-bits values if the kernel has been built in pure
205 * 32-bit. We need also to switch from KSEG0 to XKPHYS
206 * addresses now, so the code can now safely use __pa().
207 */
208 end = __pa(initrd_end);
209 initrd_end = (unsigned long)__va(end);
210 initrd_start = (unsigned long)__va(__pa(initrd_start));
211
212 ROOT_DEV = Root_RAM0;
213 return PFN_UP(end);
214disable:
215 initrd_start = 0;
216 initrd_end = 0;
217 return 0;
218}
219
220/* In some conditions (e.g. big endian bootloader with a little endian
221 kernel), the initrd might appear byte swapped. Try to detect this and
222 byte swap it if needed. */
223static void __init maybe_bswap_initrd(void)
224{
225#if defined(CONFIG_CPU_CAVIUM_OCTEON)
226 u64 buf;
227
228 /* Check for CPIO signature */
229 if (!memcmp((void *)initrd_start, "070701", 6))
230 return;
231
232 /* Check for compressed initrd */
233 if (decompress_method((unsigned char *)initrd_start, 8, NULL))
234 return;
235
236 /* Try again with a byte swapped header */
237 buf = swab64p((u64 *)initrd_start);
238 if (!memcmp(&buf, "070701", 6) ||
239 decompress_method((unsigned char *)(&buf), 8, NULL)) {
240 unsigned long i;
241
242 pr_info("Byteswapped initrd detected\n");
243 for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
244 swab64s((u64 *)i);
245 }
246#endif
247}
248
249static void __init finalize_initrd(void)
250{
251 unsigned long size = initrd_end - initrd_start;
252
253 if (size == 0) {
254 printk(KERN_INFO "Initrd not found or empty");
255 goto disable;
256 }
257 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
258 printk(KERN_ERR "Initrd extends beyond end of memory");
259 goto disable;
260 }
261
262 maybe_bswap_initrd();
263
264 memblock_reserve(__pa(initrd_start), size);
265 initrd_below_start_ok = 1;
266
267 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
268 initrd_start, size);
269 return;
270disable:
271 printk(KERN_CONT " - disabling initrd\n");
272 initrd_start = 0;
273 initrd_end = 0;
274}
275
276#else /* !CONFIG_BLK_DEV_INITRD */
277
278static unsigned long __init init_initrd(void)
279{
280 return 0;
281}
282
283#define finalize_initrd() do {} while (0)
284
285#endif
286
287/*
288 * Initialize the bootmem allocator. It also setup initrd related data
289 * if needed.
290 */
291#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
292
293static void __init bootmem_init(void)
294{
295 init_initrd();
296 finalize_initrd();
297}
298
299#else /* !CONFIG_SGI_IP27 */
300
301static void __init bootmem_init(void)
302{
303 struct memblock_region *mem;
304 phys_addr_t ramstart, ramend;
305
306 ramstart = memblock_start_of_DRAM();
307 ramend = memblock_end_of_DRAM();
308
309 /*
310 * Sanity check any INITRD first. We don't take it into account
311 * for bootmem setup initially, rely on the end-of-kernel-code
312 * as our memory range starting point. Once bootmem is inited we
313 * will reserve the area used for the initrd.
314 */
315 init_initrd();
316
317 /* Reserve memory occupied by kernel. */
318 memblock_reserve(__pa_symbol(&_text),
319 __pa_symbol(&_end) - __pa_symbol(&_text));
320
321 /* max_low_pfn is not a number of pages but the end pfn of low mem */
322
323#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
324 ARCH_PFN_OFFSET = PFN_UP(ramstart);
325#else
326 /*
327 * Reserve any memory between the start of RAM and PHYS_OFFSET
328 */
329 if (ramstart > PHYS_OFFSET)
330 memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
331
332 if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
333 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
334 (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
335 (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
336 }
337#endif
338
339 min_low_pfn = ARCH_PFN_OFFSET;
340 max_pfn = PFN_DOWN(ramend);
341 for_each_memblock(memory, mem) {
342 unsigned long start = memblock_region_memory_base_pfn(mem);
343 unsigned long end = memblock_region_memory_end_pfn(mem);
344
345 /*
346 * Skip highmem here so we get an accurate max_low_pfn if low
347 * memory stops short of high memory.
348 * If the region overlaps HIGHMEM_START, end is clipped so
349 * max_pfn excludes the highmem portion.
350 */
351 if (memblock_is_nomap(mem))
352 continue;
353 if (start >= PFN_DOWN(HIGHMEM_START))
354 continue;
355 if (end > PFN_DOWN(HIGHMEM_START))
356 end = PFN_DOWN(HIGHMEM_START);
357 if (end > max_low_pfn)
358 max_low_pfn = end;
359 }
360
361 if (min_low_pfn >= max_low_pfn)
362 panic("Incorrect memory mapping !!!");
363
364 if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
365#ifdef CONFIG_HIGHMEM
366 highstart_pfn = PFN_DOWN(HIGHMEM_START);
367 highend_pfn = max_pfn;
368#else
369 max_low_pfn = PFN_DOWN(HIGHMEM_START);
370 max_pfn = max_low_pfn;
371#endif
372 }
373
374 /*
375 * Reserve initrd memory if needed.
376 */
377 finalize_initrd();
378}
379
380#endif /* CONFIG_SGI_IP27 */
381
382static int usermem __initdata;
383
384static int __init early_parse_mem(char *p)
385{
386 phys_addr_t start, size;
387
388 /*
389 * If a user specifies memory size, we
390 * blow away any automatically generated
391 * size.
392 */
393 if (usermem == 0) {
394 usermem = 1;
395 memblock_remove(memblock_start_of_DRAM(),
396 memblock_end_of_DRAM() - memblock_start_of_DRAM());
397 }
398 start = 0;
399 size = memparse(p, &p);
400 if (*p == '@')
401 start = memparse(p + 1, &p);
402
403 add_memory_region(start, size, BOOT_MEM_RAM);
404
405 return 0;
406}
407early_param("mem", early_parse_mem);
408
409static int __init early_parse_memmap(char *p)
410{
411 char *oldp;
412 u64 start_at, mem_size;
413
414 if (!p)
415 return -EINVAL;
416
417 if (!strncmp(p, "exactmap", 8)) {
418 pr_err("\"memmap=exactmap\" invalid on MIPS\n");
419 return 0;
420 }
421
422 oldp = p;
423 mem_size = memparse(p, &p);
424 if (p == oldp)
425 return -EINVAL;
426
427 if (*p == '@') {
428 start_at = memparse(p+1, &p);
429 add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
430 } else if (*p == '#') {
431 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
432 return -EINVAL;
433 } else if (*p == '$') {
434 start_at = memparse(p+1, &p);
435 add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
436 } else {
437 pr_err("\"memmap\" invalid format!\n");
438 return -EINVAL;
439 }
440
441 if (*p == '\0') {
442 usermem = 1;
443 return 0;
444 } else
445 return -EINVAL;
446}
447early_param("memmap", early_parse_memmap);
448
449#ifdef CONFIG_PROC_VMCORE
450unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
451static int __init early_parse_elfcorehdr(char *p)
452{
453 struct memblock_region *mem;
454
455 setup_elfcorehdr = memparse(p, &p);
456
457 for_each_memblock(memory, mem) {
458 unsigned long start = mem->base;
459 unsigned long end = start + mem->size;
460 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
461 /*
462 * Reserve from the elf core header to the end of
463 * the memory segment, that should all be kdump
464 * reserved memory.
465 */
466 setup_elfcorehdr_size = end - setup_elfcorehdr;
467 break;
468 }
469 }
470 /*
471 * If we don't find it in the memory map, then we shouldn't
472 * have to worry about it, as the new kernel won't use it.
473 */
474 return 0;
475}
476early_param("elfcorehdr", early_parse_elfcorehdr);
477#endif
478
479#ifdef CONFIG_KEXEC
480static void __init mips_parse_crashkernel(void)
481{
482 unsigned long long total_mem;
483 unsigned long long crash_size, crash_base;
484 int ret;
485
486 total_mem = memblock_phys_mem_size();
487 ret = parse_crashkernel(boot_command_line, total_mem,
488 &crash_size, &crash_base);
489 if (ret != 0 || crash_size <= 0)
490 return;
491
492 if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
493 pr_warn("Invalid memory region reserved for crash kernel\n");
494 return;
495 }
496
497 crashk_res.start = crash_base;
498 crashk_res.end = crash_base + crash_size - 1;
499}
500
501static void __init request_crashkernel(struct resource *res)
502{
503 int ret;
504
505 if (crashk_res.start == crashk_res.end)
506 return;
507
508 ret = request_resource(res, &crashk_res);
509 if (!ret)
510 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
511 (unsigned long)(resource_size(&crashk_res) >> 20),
512 (unsigned long)(crashk_res.start >> 20));
513}
514#else /* !defined(CONFIG_KEXEC) */
515static void __init mips_parse_crashkernel(void)
516{
517}
518
519static void __init request_crashkernel(struct resource *res)
520{
521}
522#endif /* !defined(CONFIG_KEXEC) */
523
524static void __init check_kernel_sections_mem(void)
525{
526 phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
527 phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
528
529 if (!memblock_is_region_memory(start, size)) {
530 pr_info("Kernel sections are not in the memory maps\n");
531 memblock_add(start, size);
532 }
533}
534
535static void __init bootcmdline_append(const char *s, size_t max)
536{
537 if (!s[0] || !max)
538 return;
539
540 if (boot_command_line[0])
541 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
542
543 strlcat(boot_command_line, s, max);
544}
545
546#ifdef CONFIG_OF_EARLY_FLATTREE
547
548static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
549 int depth, void *data)
550{
551 bool *dt_bootargs = data;
552 const char *p;
553 int l;
554
555 if (depth != 1 || !data ||
556 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
557 return 0;
558
559 p = of_get_flat_dt_prop(node, "bootargs", &l);
560 if (p != NULL && l > 0) {
561 bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
562 *dt_bootargs = true;
563 }
564
565 return 1;
566}
567
568#endif /* CONFIG_OF_EARLY_FLATTREE */
569
570static void __init bootcmdline_init(void)
571{
572 bool dt_bootargs = false;
573
574 /*
575 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
576 * trivial - we simply use the built-in command line unconditionally &
577 * unmodified.
578 */
579 if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
580 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
581 return;
582 }
583
584 /*
585 * If the user specified a built-in command line &
586 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
587 * prepended to arguments from the bootloader or DT so we'll copy them
588 * to the start of boot_command_line here. Otherwise, empty
589 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
590 */
591 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
592 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
593 else
594 boot_command_line[0] = 0;
595
596#ifdef CONFIG_OF_EARLY_FLATTREE
597 /*
598 * If we're configured to take boot arguments from DT, look for those
599 * now.
600 */
601 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
602 IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
603 of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
604#endif
605
606 /*
607 * If we didn't get any arguments from DT (regardless of whether that's
608 * because we weren't configured to look for them, or because we looked
609 * & found none) then we'll take arguments from the bootloader.
610 * plat_mem_setup() should have filled arcs_cmdline with arguments from
611 * the bootloader.
612 */
613 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
614 bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
615
616 /*
617 * If the user specified a built-in command line & we didn't already
618 * prepend it, we append it to boot_command_line here.
619 */
620 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
621 !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
622 bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
623}
624
625/*
626 * arch_mem_init - initialize memory management subsystem
627 *
628 * o plat_mem_setup() detects the memory configuration and will record detected
629 * memory areas using add_memory_region.
630 *
631 * At this stage the memory configuration of the system is known to the
632 * kernel but generic memory management system is still entirely uninitialized.
633 *
634 * o bootmem_init()
635 * o sparse_init()
636 * o paging_init()
637 * o dma_contiguous_reserve()
638 *
639 * At this stage the bootmem allocator is ready to use.
640 *
641 * NOTE: historically plat_mem_setup did the entire platform initialization.
642 * This was rather impractical because it meant plat_mem_setup had to
643 * get away without any kind of memory allocator. To keep old code from
644 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
645 * initialization hook for anything else was introduced.
646 */
647static void __init arch_mem_init(char **cmdline_p)
648{
649 /* call board setup routine */
650 plat_mem_setup();
651 memblock_set_bottom_up(true);
652
653 bootcmdline_init();
654 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
655 *cmdline_p = command_line;
656
657 parse_early_param();
658
659 if (usermem)
660 pr_info("User-defined physical RAM map overwrite\n");
661
662 check_kernel_sections_mem();
663
664 early_init_fdt_reserve_self();
665 early_init_fdt_scan_reserved_mem();
666
667#ifndef CONFIG_NUMA
668 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
669#endif
670 bootmem_init();
671
672 /*
673 * Prevent memblock from allocating high memory.
674 * This cannot be done before max_low_pfn is detected, so up
675 * to this point is possible to only reserve physical memory
676 * with memblock_reserve; memblock_alloc* can be used
677 * only after this point
678 */
679 memblock_set_current_limit(PFN_PHYS(max_low_pfn));
680
681#ifdef CONFIG_PROC_VMCORE
682 if (setup_elfcorehdr && setup_elfcorehdr_size) {
683 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
684 setup_elfcorehdr, setup_elfcorehdr_size);
685 memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
686 }
687#endif
688
689 mips_parse_crashkernel();
690#ifdef CONFIG_KEXEC
691 if (crashk_res.start != crashk_res.end)
692 memblock_reserve(crashk_res.start, resource_size(&crashk_res));
693#endif
694 device_tree_init();
695
696 /*
697 * In order to reduce the possibility of kernel panic when failed to
698 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
699 * low memory as small as possible before plat_swiotlb_setup(), so
700 * make sparse_init() using top-down allocation.
701 */
702 memblock_set_bottom_up(false);
703 sparse_init();
704 memblock_set_bottom_up(true);
705
706 plat_swiotlb_setup();
707
708 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
709
710 /* Reserve for hibernation. */
711 memblock_reserve(__pa_symbol(&__nosave_begin),
712 __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
713
714 fdt_init_reserved_mem();
715
716 memblock_dump_all();
717
718 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
719}
720
721static void __init resource_init(void)
722{
723 struct memblock_region *region;
724
725 if (UNCAC_BASE != IO_BASE)
726 return;
727
728 code_resource.start = __pa_symbol(&_text);
729 code_resource.end = __pa_symbol(&_etext) - 1;
730 data_resource.start = __pa_symbol(&_etext);
731 data_resource.end = __pa_symbol(&_edata) - 1;
732 bss_resource.start = __pa_symbol(&__bss_start);
733 bss_resource.end = __pa_symbol(&__bss_stop) - 1;
734
735 for_each_memblock(memory, region) {
736 phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
737 phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
738 struct resource *res;
739
740 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
741 if (!res)
742 panic("%s: Failed to allocate %zu bytes\n", __func__,
743 sizeof(struct resource));
744
745 res->start = start;
746 res->end = end;
747 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
748 res->name = "System RAM";
749
750 request_resource(&iomem_resource, res);
751
752 /*
753 * We don't know which RAM region contains kernel data,
754 * so we try it repeatedly and let the resource manager
755 * test it.
756 */
757 request_resource(res, &code_resource);
758 request_resource(res, &data_resource);
759 request_resource(res, &bss_resource);
760 request_crashkernel(res);
761 }
762}
763
764#ifdef CONFIG_SMP
765static void __init prefill_possible_map(void)
766{
767 int i, possible = num_possible_cpus();
768
769 if (possible > nr_cpu_ids)
770 possible = nr_cpu_ids;
771
772 for (i = 0; i < possible; i++)
773 set_cpu_possible(i, true);
774 for (; i < NR_CPUS; i++)
775 set_cpu_possible(i, false);
776
777 nr_cpu_ids = possible;
778}
779#else
780static inline void prefill_possible_map(void) {}
781#endif
782
783void __init setup_arch(char **cmdline_p)
784{
785 cpu_probe();
786 mips_cm_probe();
787 prom_init();
788
789 setup_early_fdc_console();
790#ifdef CONFIG_EARLY_PRINTK
791 setup_early_printk();
792#endif
793 cpu_report();
794 check_bugs_early();
795
796#if defined(CONFIG_VT)
797#if defined(CONFIG_VGA_CONSOLE)
798 conswitchp = &vga_con;
799#endif
800#endif
801
802 arch_mem_init(cmdline_p);
803 dmi_setup();
804
805 resource_init();
806 plat_smp_setup();
807 prefill_possible_map();
808
809 cpu_cache_init();
810 paging_init();
811}
812
813unsigned long kernelsp[NR_CPUS];
814unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
815
816#ifdef CONFIG_USE_OF
817unsigned long fw_passed_dtb;
818#endif
819
820#ifdef CONFIG_DEBUG_FS
821struct dentry *mips_debugfs_dir;
822static int __init debugfs_mips(void)
823{
824 mips_debugfs_dir = debugfs_create_dir("mips", NULL);
825 return 0;
826}
827arch_initcall(debugfs_mips);
828#endif
829
830#ifdef CONFIG_DMA_MAYBE_COHERENT
831/* User defined DMA coherency from command line. */
832enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
833EXPORT_SYMBOL_GPL(coherentio);
834int hw_coherentio; /* Actual hardware supported DMA coherency setting. */
835
836static int __init setcoherentio(char *str)
837{
838 coherentio = IO_COHERENCE_ENABLED;
839 pr_info("Hardware DMA cache coherency (command line)\n");
840 return 0;
841}
842early_param("coherentio", setcoherentio);
843
844static int __init setnocoherentio(char *str)
845{
846 coherentio = IO_COHERENCE_DISABLED;
847 pr_info("Software DMA cache coherency (command line)\n");
848 return 0;
849}
850early_param("nocoherentio", setnocoherentio);
851#endif