Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
12 */
13#include <linux/init.h>
14#include <linux/ioport.h>
15#include <linux/export.h>
16#include <linux/screen_info.h>
17#include <linux/memblock.h>
18#include <linux/bootmem.h>
19#include <linux/initrd.h>
20#include <linux/root_dev.h>
21#include <linux/highmem.h>
22#include <linux/console.h>
23#include <linux/pfn.h>
24#include <linux/debugfs.h>
25#include <linux/kexec.h>
26#include <linux/sizes.h>
27#include <linux/device.h>
28#include <linux/dma-contiguous.h>
29
30#include <asm/addrspace.h>
31#include <asm/bootinfo.h>
32#include <asm/bugs.h>
33#include <asm/cache.h>
34#include <asm/cdmm.h>
35#include <asm/cpu.h>
36#include <asm/debug.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/smp-ops.h>
40#include <asm/prom.h>
41
42#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
43const char __section(.appended_dtb) __appended_dtb[0x100000];
44#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
45
46struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
47
48EXPORT_SYMBOL(cpu_data);
49
50#ifdef CONFIG_VT
51struct screen_info screen_info;
52#endif
53
54/*
55 * Despite it's name this variable is even if we don't have PCI
56 */
57unsigned int PCI_DMA_BUS_IS_PHYS;
58
59EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
60
61/*
62 * Setup information
63 *
64 * These are initialized so they are in the .data section
65 */
66unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
67
68EXPORT_SYMBOL(mips_machtype);
69
70struct boot_mem_map boot_mem_map;
71
72static char __initdata command_line[COMMAND_LINE_SIZE];
73char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
74
75#ifdef CONFIG_CMDLINE_BOOL
76static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
77#endif
78
79/*
80 * mips_io_port_base is the begin of the address space to which x86 style
81 * I/O ports are mapped.
82 */
83const unsigned long mips_io_port_base = -1;
84EXPORT_SYMBOL(mips_io_port_base);
85
86static struct resource code_resource = { .name = "Kernel code", };
87static struct resource data_resource = { .name = "Kernel data", };
88
89static void *detect_magic __initdata = detect_memory_region;
90
91void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
92{
93 int x = boot_mem_map.nr_map;
94 int i;
95
96 /* Sanity check */
97 if (start + size < start) {
98 pr_warn("Trying to add an invalid memory region, skipped\n");
99 return;
100 }
101
102 /*
103 * Try to merge with existing entry, if any.
104 */
105 for (i = 0; i < boot_mem_map.nr_map; i++) {
106 struct boot_mem_map_entry *entry = boot_mem_map.map + i;
107 unsigned long top;
108
109 if (entry->type != type)
110 continue;
111
112 if (start + size < entry->addr)
113 continue; /* no overlap */
114
115 if (entry->addr + entry->size < start)
116 continue; /* no overlap */
117
118 top = max(entry->addr + entry->size, start + size);
119 entry->addr = min(entry->addr, start);
120 entry->size = top - entry->addr;
121
122 return;
123 }
124
125 if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
126 pr_err("Ooops! Too many entries in the memory map!\n");
127 return;
128 }
129
130 boot_mem_map.map[x].addr = start;
131 boot_mem_map.map[x].size = size;
132 boot_mem_map.map[x].type = type;
133 boot_mem_map.nr_map++;
134}
135
136void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
137{
138 void *dm = &detect_magic;
139 phys_addr_t size;
140
141 for (size = sz_min; size < sz_max; size <<= 1) {
142 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
143 break;
144 }
145
146 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
147 ((unsigned long long) size) / SZ_1M,
148 (unsigned long long) start,
149 ((unsigned long long) sz_min) / SZ_1M,
150 ((unsigned long long) sz_max) / SZ_1M);
151
152 add_memory_region(start, size, BOOT_MEM_RAM);
153}
154
155static void __init print_memory_map(void)
156{
157 int i;
158 const int field = 2 * sizeof(unsigned long);
159
160 for (i = 0; i < boot_mem_map.nr_map; i++) {
161 printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
162 field, (unsigned long long) boot_mem_map.map[i].size,
163 field, (unsigned long long) boot_mem_map.map[i].addr);
164
165 switch (boot_mem_map.map[i].type) {
166 case BOOT_MEM_RAM:
167 printk(KERN_CONT "(usable)\n");
168 break;
169 case BOOT_MEM_INIT_RAM:
170 printk(KERN_CONT "(usable after init)\n");
171 break;
172 case BOOT_MEM_ROM_DATA:
173 printk(KERN_CONT "(ROM data)\n");
174 break;
175 case BOOT_MEM_RESERVED:
176 printk(KERN_CONT "(reserved)\n");
177 break;
178 default:
179 printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
180 break;
181 }
182 }
183}
184
185/*
186 * Manage initrd
187 */
188#ifdef CONFIG_BLK_DEV_INITRD
189
190static int __init rd_start_early(char *p)
191{
192 unsigned long start = memparse(p, &p);
193
194#ifdef CONFIG_64BIT
195 /* Guess if the sign extension was forgotten by bootloader */
196 if (start < XKPHYS)
197 start = (int)start;
198#endif
199 initrd_start = start;
200 initrd_end += start;
201 return 0;
202}
203early_param("rd_start", rd_start_early);
204
205static int __init rd_size_early(char *p)
206{
207 initrd_end += memparse(p, &p);
208 return 0;
209}
210early_param("rd_size", rd_size_early);
211
212/* it returns the next free pfn after initrd */
213static unsigned long __init init_initrd(void)
214{
215 unsigned long end;
216
217 /*
218 * Board specific code or command line parser should have
219 * already set up initrd_start and initrd_end. In these cases
220 * perfom sanity checks and use them if all looks good.
221 */
222 if (!initrd_start || initrd_end <= initrd_start)
223 goto disable;
224
225 if (initrd_start & ~PAGE_MASK) {
226 pr_err("initrd start must be page aligned\n");
227 goto disable;
228 }
229 if (initrd_start < PAGE_OFFSET) {
230 pr_err("initrd start < PAGE_OFFSET\n");
231 goto disable;
232 }
233
234 /*
235 * Sanitize initrd addresses. For example firmware
236 * can't guess if they need to pass them through
237 * 64-bits values if the kernel has been built in pure
238 * 32-bit. We need also to switch from KSEG0 to XKPHYS
239 * addresses now, so the code can now safely use __pa().
240 */
241 end = __pa(initrd_end);
242 initrd_end = (unsigned long)__va(end);
243 initrd_start = (unsigned long)__va(__pa(initrd_start));
244
245 ROOT_DEV = Root_RAM0;
246 return PFN_UP(end);
247disable:
248 initrd_start = 0;
249 initrd_end = 0;
250 return 0;
251}
252
253static void __init finalize_initrd(void)
254{
255 unsigned long size = initrd_end - initrd_start;
256
257 if (size == 0) {
258 printk(KERN_INFO "Initrd not found or empty");
259 goto disable;
260 }
261 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
262 printk(KERN_ERR "Initrd extends beyond end of memory");
263 goto disable;
264 }
265
266 reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
267 initrd_below_start_ok = 1;
268
269 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
270 initrd_start, size);
271 return;
272disable:
273 printk(KERN_CONT " - disabling initrd\n");
274 initrd_start = 0;
275 initrd_end = 0;
276}
277
278#else /* !CONFIG_BLK_DEV_INITRD */
279
280static unsigned long __init init_initrd(void)
281{
282 return 0;
283}
284
285#define finalize_initrd() do {} while (0)
286
287#endif
288
289/*
290 * Initialize the bootmem allocator. It also setup initrd related data
291 * if needed.
292 */
293#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
294
295static void __init bootmem_init(void)
296{
297 init_initrd();
298 finalize_initrd();
299}
300
301#else /* !CONFIG_SGI_IP27 */
302
303static void __init bootmem_init(void)
304{
305 unsigned long reserved_end;
306 unsigned long mapstart = ~0UL;
307 unsigned long bootmap_size;
308 int i;
309
310 /*
311 * Sanity check any INITRD first. We don't take it into account
312 * for bootmem setup initially, rely on the end-of-kernel-code
313 * as our memory range starting point. Once bootmem is inited we
314 * will reserve the area used for the initrd.
315 */
316 init_initrd();
317 reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
318
319 /*
320 * max_low_pfn is not a number of pages. The number of pages
321 * of the system is given by 'max_low_pfn - min_low_pfn'.
322 */
323 min_low_pfn = ~0UL;
324 max_low_pfn = 0;
325
326 /*
327 * Find the highest page frame number we have available.
328 */
329 for (i = 0; i < boot_mem_map.nr_map; i++) {
330 unsigned long start, end;
331
332 if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
333 continue;
334
335 start = PFN_UP(boot_mem_map.map[i].addr);
336 end = PFN_DOWN(boot_mem_map.map[i].addr
337 + boot_mem_map.map[i].size);
338
339 if (end > max_low_pfn)
340 max_low_pfn = end;
341 if (start < min_low_pfn)
342 min_low_pfn = start;
343 if (end <= reserved_end)
344 continue;
345#ifdef CONFIG_BLK_DEV_INITRD
346 /* Skip zones before initrd and initrd itself */
347 if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
348 continue;
349#endif
350 if (start >= mapstart)
351 continue;
352 mapstart = max(reserved_end, start);
353 }
354
355 if (min_low_pfn >= max_low_pfn)
356 panic("Incorrect memory mapping !!!");
357 if (min_low_pfn > ARCH_PFN_OFFSET) {
358 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
359 (min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
360 min_low_pfn - ARCH_PFN_OFFSET);
361 } else if (min_low_pfn < ARCH_PFN_OFFSET) {
362 pr_info("%lu free pages won't be used\n",
363 ARCH_PFN_OFFSET - min_low_pfn);
364 }
365 min_low_pfn = ARCH_PFN_OFFSET;
366
367 /*
368 * Determine low and high memory ranges
369 */
370 max_pfn = max_low_pfn;
371 if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
372#ifdef CONFIG_HIGHMEM
373 highstart_pfn = PFN_DOWN(HIGHMEM_START);
374 highend_pfn = max_low_pfn;
375#endif
376 max_low_pfn = PFN_DOWN(HIGHMEM_START);
377 }
378
379#ifdef CONFIG_BLK_DEV_INITRD
380 /*
381 * mapstart should be after initrd_end
382 */
383 if (initrd_end)
384 mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
385#endif
386
387 /*
388 * Initialize the boot-time allocator with low memory only.
389 */
390 bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
391 min_low_pfn, max_low_pfn);
392
393
394 for (i = 0; i < boot_mem_map.nr_map; i++) {
395 unsigned long start, end;
396
397 start = PFN_UP(boot_mem_map.map[i].addr);
398 end = PFN_DOWN(boot_mem_map.map[i].addr
399 + boot_mem_map.map[i].size);
400
401 if (start <= min_low_pfn)
402 start = min_low_pfn;
403 if (start >= end)
404 continue;
405
406#ifndef CONFIG_HIGHMEM
407 if (end > max_low_pfn)
408 end = max_low_pfn;
409
410 /*
411 * ... finally, is the area going away?
412 */
413 if (end <= start)
414 continue;
415#endif
416
417 memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
418 }
419
420 /*
421 * Register fully available low RAM pages with the bootmem allocator.
422 */
423 for (i = 0; i < boot_mem_map.nr_map; i++) {
424 unsigned long start, end, size;
425
426 start = PFN_UP(boot_mem_map.map[i].addr);
427 end = PFN_DOWN(boot_mem_map.map[i].addr
428 + boot_mem_map.map[i].size);
429
430 /*
431 * Reserve usable memory.
432 */
433 switch (boot_mem_map.map[i].type) {
434 case BOOT_MEM_RAM:
435 break;
436 case BOOT_MEM_INIT_RAM:
437 memory_present(0, start, end);
438 continue;
439 default:
440 /* Not usable memory */
441 continue;
442 }
443
444 /*
445 * We are rounding up the start address of usable memory
446 * and at the end of the usable range downwards.
447 */
448 if (start >= max_low_pfn)
449 continue;
450 if (start < reserved_end)
451 start = reserved_end;
452 if (end > max_low_pfn)
453 end = max_low_pfn;
454
455 /*
456 * ... finally, is the area going away?
457 */
458 if (end <= start)
459 continue;
460 size = end - start;
461
462 /* Register lowmem ranges */
463 free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
464 memory_present(0, start, end);
465 }
466
467 /*
468 * Reserve the bootmap memory.
469 */
470 reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
471
472 /*
473 * Reserve initrd memory if needed.
474 */
475 finalize_initrd();
476}
477
478#endif /* CONFIG_SGI_IP27 */
479
480/*
481 * arch_mem_init - initialize memory management subsystem
482 *
483 * o plat_mem_setup() detects the memory configuration and will record detected
484 * memory areas using add_memory_region.
485 *
486 * At this stage the memory configuration of the system is known to the
487 * kernel but generic memory management system is still entirely uninitialized.
488 *
489 * o bootmem_init()
490 * o sparse_init()
491 * o paging_init()
492 * o dma_contiguous_reserve()
493 *
494 * At this stage the bootmem allocator is ready to use.
495 *
496 * NOTE: historically plat_mem_setup did the entire platform initialization.
497 * This was rather impractical because it meant plat_mem_setup had to
498 * get away without any kind of memory allocator. To keep old code from
499 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
500 * initialization hook for anything else was introduced.
501 */
502
503static int usermem __initdata;
504
505static int __init early_parse_mem(char *p)
506{
507 phys_addr_t start, size;
508
509 /*
510 * If a user specifies memory size, we
511 * blow away any automatically generated
512 * size.
513 */
514 if (usermem == 0) {
515 boot_mem_map.nr_map = 0;
516 usermem = 1;
517 }
518 start = 0;
519 size = memparse(p, &p);
520 if (*p == '@')
521 start = memparse(p + 1, &p);
522
523 add_memory_region(start, size, BOOT_MEM_RAM);
524 return 0;
525}
526early_param("mem", early_parse_mem);
527
528#ifdef CONFIG_PROC_VMCORE
529unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
530static int __init early_parse_elfcorehdr(char *p)
531{
532 int i;
533
534 setup_elfcorehdr = memparse(p, &p);
535
536 for (i = 0; i < boot_mem_map.nr_map; i++) {
537 unsigned long start = boot_mem_map.map[i].addr;
538 unsigned long end = (boot_mem_map.map[i].addr +
539 boot_mem_map.map[i].size);
540 if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
541 /*
542 * Reserve from the elf core header to the end of
543 * the memory segment, that should all be kdump
544 * reserved memory.
545 */
546 setup_elfcorehdr_size = end - setup_elfcorehdr;
547 break;
548 }
549 }
550 /*
551 * If we don't find it in the memory map, then we shouldn't
552 * have to worry about it, as the new kernel won't use it.
553 */
554 return 0;
555}
556early_param("elfcorehdr", early_parse_elfcorehdr);
557#endif
558
559static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
560{
561 phys_addr_t size;
562 int i;
563
564 size = end - mem;
565 if (!size)
566 return;
567
568 /* Make sure it is in the boot_mem_map */
569 for (i = 0; i < boot_mem_map.nr_map; i++) {
570 if (mem >= boot_mem_map.map[i].addr &&
571 mem < (boot_mem_map.map[i].addr +
572 boot_mem_map.map[i].size))
573 return;
574 }
575 add_memory_region(mem, size, type);
576}
577
578#ifdef CONFIG_KEXEC
579static inline unsigned long long get_total_mem(void)
580{
581 unsigned long long total;
582
583 total = max_pfn - min_low_pfn;
584 return total << PAGE_SHIFT;
585}
586
587static void __init mips_parse_crashkernel(void)
588{
589 unsigned long long total_mem;
590 unsigned long long crash_size, crash_base;
591 int ret;
592
593 total_mem = get_total_mem();
594 ret = parse_crashkernel(boot_command_line, total_mem,
595 &crash_size, &crash_base);
596 if (ret != 0 || crash_size <= 0)
597 return;
598
599 crashk_res.start = crash_base;
600 crashk_res.end = crash_base + crash_size - 1;
601}
602
603static void __init request_crashkernel(struct resource *res)
604{
605 int ret;
606
607 ret = request_resource(res, &crashk_res);
608 if (!ret)
609 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
610 (unsigned long)((crashk_res.end -
611 crashk_res.start + 1) >> 20),
612 (unsigned long)(crashk_res.start >> 20));
613}
614#else /* !defined(CONFIG_KEXEC) */
615static void __init mips_parse_crashkernel(void)
616{
617}
618
619static void __init request_crashkernel(struct resource *res)
620{
621}
622#endif /* !defined(CONFIG_KEXEC) */
623
624#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
625#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
626#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
627
628static void __init arch_mem_init(char **cmdline_p)
629{
630 struct memblock_region *reg;
631 extern void plat_mem_setup(void);
632
633 /* call board setup routine */
634 plat_mem_setup();
635
636 /*
637 * Make sure all kernel memory is in the maps. The "UP" and
638 * "DOWN" are opposite for initdata since if it crosses over
639 * into another memory section you don't want that to be
640 * freed when the initdata is freed.
641 */
642 arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
643 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
644 BOOT_MEM_RAM);
645 arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
646 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
647 BOOT_MEM_INIT_RAM);
648
649 pr_info("Determined physical RAM map:\n");
650 print_memory_map();
651
652#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
653 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
654#else
655 if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
656 (USE_DTB_CMDLINE && !boot_command_line[0]))
657 strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
658
659 if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
660 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
661 strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
662 }
663
664#if defined(CONFIG_CMDLINE_BOOL)
665 if (builtin_cmdline[0]) {
666 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
667 strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
668 }
669#endif
670#endif
671 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
672
673 *cmdline_p = command_line;
674
675 parse_early_param();
676
677 if (usermem) {
678 pr_info("User-defined physical RAM map:\n");
679 print_memory_map();
680 }
681
682 bootmem_init();
683#ifdef CONFIG_PROC_VMCORE
684 if (setup_elfcorehdr && setup_elfcorehdr_size) {
685 printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
686 setup_elfcorehdr, setup_elfcorehdr_size);
687 reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
688 BOOTMEM_DEFAULT);
689 }
690#endif
691
692 mips_parse_crashkernel();
693#ifdef CONFIG_KEXEC
694 if (crashk_res.start != crashk_res.end)
695 reserve_bootmem(crashk_res.start,
696 crashk_res.end - crashk_res.start + 1,
697 BOOTMEM_DEFAULT);
698#endif
699 device_tree_init();
700 sparse_init();
701 plat_swiotlb_setup();
702 paging_init();
703
704 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
705 /* Tell bootmem about cma reserved memblock section */
706 for_each_memblock(reserved, reg)
707 if (reg->size != 0)
708 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
709}
710
711static void __init resource_init(void)
712{
713 int i;
714
715 if (UNCAC_BASE != IO_BASE)
716 return;
717
718 code_resource.start = __pa_symbol(&_text);
719 code_resource.end = __pa_symbol(&_etext) - 1;
720 data_resource.start = __pa_symbol(&_etext);
721 data_resource.end = __pa_symbol(&_edata) - 1;
722
723 for (i = 0; i < boot_mem_map.nr_map; i++) {
724 struct resource *res;
725 unsigned long start, end;
726
727 start = boot_mem_map.map[i].addr;
728 end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
729 if (start >= HIGHMEM_START)
730 continue;
731 if (end >= HIGHMEM_START)
732 end = HIGHMEM_START - 1;
733
734 res = alloc_bootmem(sizeof(struct resource));
735
736 res->start = start;
737 res->end = end;
738 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
739
740 switch (boot_mem_map.map[i].type) {
741 case BOOT_MEM_RAM:
742 case BOOT_MEM_INIT_RAM:
743 case BOOT_MEM_ROM_DATA:
744 res->name = "System RAM";
745 res->flags |= IORESOURCE_SYSRAM;
746 break;
747 case BOOT_MEM_RESERVED:
748 default:
749 res->name = "reserved";
750 }
751
752 request_resource(&iomem_resource, res);
753
754 /*
755 * We don't know which RAM region contains kernel data,
756 * so we try it repeatedly and let the resource manager
757 * test it.
758 */
759 request_resource(res, &code_resource);
760 request_resource(res, &data_resource);
761 request_crashkernel(res);
762 }
763}
764
765#ifdef CONFIG_SMP
766static void __init prefill_possible_map(void)
767{
768 int i, possible = num_possible_cpus();
769
770 if (possible > nr_cpu_ids)
771 possible = nr_cpu_ids;
772
773 for (i = 0; i < possible; i++)
774 set_cpu_possible(i, true);
775 for (; i < NR_CPUS; i++)
776 set_cpu_possible(i, false);
777
778 nr_cpu_ids = possible;
779}
780#else
781static inline void prefill_possible_map(void) {}
782#endif
783
784void __init setup_arch(char **cmdline_p)
785{
786 cpu_probe();
787 mips_cm_probe();
788 prom_init();
789
790 setup_early_fdc_console();
791#ifdef CONFIG_EARLY_PRINTK
792 setup_early_printk();
793#endif
794 cpu_report();
795 check_bugs_early();
796
797#if defined(CONFIG_VT)
798#if defined(CONFIG_VGA_CONSOLE)
799 conswitchp = &vga_con;
800#elif defined(CONFIG_DUMMY_CONSOLE)
801 conswitchp = &dummy_con;
802#endif
803#endif
804
805 arch_mem_init(cmdline_p);
806
807 resource_init();
808 plat_smp_setup();
809 prefill_possible_map();
810
811 cpu_cache_init();
812}
813
814unsigned long kernelsp[NR_CPUS];
815unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
816
817#ifdef CONFIG_DEBUG_FS
818struct dentry *mips_debugfs_dir;
819static int __init debugfs_mips(void)
820{
821 struct dentry *d;
822
823 d = debugfs_create_dir("mips", NULL);
824 if (!d)
825 return -ENOMEM;
826 mips_debugfs_dir = d;
827 return 0;
828}
829arch_initcall(debugfs_mips);
830#endif
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1995 Linus Torvalds
7 * Copyright (C) 1995 Waldorf Electronics
8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
9 * Copyright (C) 1996 Stoned Elipot
10 * Copyright (C) 1999 Silicon Graphics, Inc.
11 * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
12 */
13#include <linux/init.h>
14#include <linux/cpu.h>
15#include <linux/delay.h>
16#include <linux/ioport.h>
17#include <linux/export.h>
18#include <linux/memblock.h>
19#include <linux/initrd.h>
20#include <linux/root_dev.h>
21#include <linux/highmem.h>
22#include <linux/console.h>
23#include <linux/pfn.h>
24#include <linux/debugfs.h>
25#include <linux/kexec.h>
26#include <linux/sizes.h>
27#include <linux/device.h>
28#include <linux/dma-map-ops.h>
29#include <linux/decompress/generic.h>
30#include <linux/of_fdt.h>
31#include <linux/dmi.h>
32#include <linux/crash_dump.h>
33
34#include <asm/addrspace.h>
35#include <asm/bootinfo.h>
36#include <asm/bugs.h>
37#include <asm/cache.h>
38#include <asm/cdmm.h>
39#include <asm/cpu.h>
40#include <asm/debug.h>
41#include <asm/mmzone.h>
42#include <asm/sections.h>
43#include <asm/setup.h>
44#include <asm/smp-ops.h>
45#include <asm/mips-cps.h>
46#include <asm/prom.h>
47#include <asm/fw/fw.h>
48
49#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
50char __section(".appended_dtb") __appended_dtb[0x100000];
51#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
52
53struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
54
55EXPORT_SYMBOL(cpu_data);
56
57/*
58 * Setup information
59 *
60 * These are initialized so they are in the .data section
61 */
62unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
63
64EXPORT_SYMBOL(mips_machtype);
65
66static char __initdata command_line[COMMAND_LINE_SIZE];
67char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
68
69#ifdef CONFIG_CMDLINE_BOOL
70static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
71#else
72static const char builtin_cmdline[] __initconst = "";
73#endif
74
75/*
76 * mips_io_port_base is the begin of the address space to which x86 style
77 * I/O ports are mapped.
78 */
79unsigned long mips_io_port_base = -1;
80EXPORT_SYMBOL(mips_io_port_base);
81
82static struct resource code_resource = { .name = "Kernel code", };
83static struct resource data_resource = { .name = "Kernel data", };
84static struct resource bss_resource = { .name = "Kernel bss", };
85
86unsigned long __kaslr_offset __ro_after_init;
87EXPORT_SYMBOL(__kaslr_offset);
88
89static void *detect_magic __initdata = detect_memory_region;
90
91#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
92unsigned long ARCH_PFN_OFFSET;
93EXPORT_SYMBOL(ARCH_PFN_OFFSET);
94#endif
95
96void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
97{
98 void *dm = &detect_magic;
99 phys_addr_t size;
100
101 for (size = sz_min; size < sz_max; size <<= 1) {
102 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
103 break;
104 }
105
106 pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
107 ((unsigned long long) size) / SZ_1M,
108 (unsigned long long) start,
109 ((unsigned long long) sz_min) / SZ_1M,
110 ((unsigned long long) sz_max) / SZ_1M);
111
112 memblock_add(start, size);
113}
114
115/*
116 * Manage initrd
117 */
118#ifdef CONFIG_BLK_DEV_INITRD
119
120static int __init rd_start_early(char *p)
121{
122 unsigned long start = memparse(p, &p);
123
124#ifdef CONFIG_64BIT
125 /* Guess if the sign extension was forgotten by bootloader */
126 if (start < XKPHYS)
127 start = (int)start;
128#endif
129 initrd_start = start;
130 initrd_end += start;
131 return 0;
132}
133early_param("rd_start", rd_start_early);
134
135static int __init rd_size_early(char *p)
136{
137 initrd_end += memparse(p, &p);
138 return 0;
139}
140early_param("rd_size", rd_size_early);
141
142/* it returns the next free pfn after initrd */
143static unsigned long __init init_initrd(void)
144{
145 unsigned long end;
146
147 /*
148 * Board specific code or command line parser should have
149 * already set up initrd_start and initrd_end. In these cases
150 * perform sanity checks and use them if all looks good.
151 */
152 if (!initrd_start || initrd_end <= initrd_start)
153 goto disable;
154
155 if (initrd_start & ~PAGE_MASK) {
156 pr_err("initrd start must be page aligned\n");
157 goto disable;
158 }
159
160 /*
161 * Sanitize initrd addresses. For example firmware
162 * can't guess if they need to pass them through
163 * 64-bits values if the kernel has been built in pure
164 * 32-bit. We need also to switch from KSEG0 to XKPHYS
165 * addresses now, so the code can now safely use __pa().
166 */
167 end = __pa(initrd_end);
168 initrd_end = (unsigned long)__va(end);
169 initrd_start = (unsigned long)__va(__pa(initrd_start));
170
171 if (initrd_start < PAGE_OFFSET) {
172 pr_err("initrd start < PAGE_OFFSET\n");
173 goto disable;
174 }
175
176 ROOT_DEV = Root_RAM0;
177 return PFN_UP(end);
178disable:
179 initrd_start = 0;
180 initrd_end = 0;
181 return 0;
182}
183
184/* In some conditions (e.g. big endian bootloader with a little endian
185 kernel), the initrd might appear byte swapped. Try to detect this and
186 byte swap it if needed. */
187static void __init maybe_bswap_initrd(void)
188{
189#if defined(CONFIG_CPU_CAVIUM_OCTEON)
190 u64 buf;
191
192 /* Check for CPIO signature */
193 if (!memcmp((void *)initrd_start, "070701", 6))
194 return;
195
196 /* Check for compressed initrd */
197 if (decompress_method((unsigned char *)initrd_start, 8, NULL))
198 return;
199
200 /* Try again with a byte swapped header */
201 buf = swab64p((u64 *)initrd_start);
202 if (!memcmp(&buf, "070701", 6) ||
203 decompress_method((unsigned char *)(&buf), 8, NULL)) {
204 unsigned long i;
205
206 pr_info("Byteswapped initrd detected\n");
207 for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
208 swab64s((u64 *)i);
209 }
210#endif
211}
212
213static void __init finalize_initrd(void)
214{
215 unsigned long size = initrd_end - initrd_start;
216
217 if (size == 0) {
218 printk(KERN_INFO "Initrd not found or empty");
219 goto disable;
220 }
221 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
222 printk(KERN_ERR "Initrd extends beyond end of memory");
223 goto disable;
224 }
225
226 maybe_bswap_initrd();
227
228 memblock_reserve(__pa(initrd_start), size);
229 initrd_below_start_ok = 1;
230
231 pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
232 initrd_start, size);
233 return;
234disable:
235 printk(KERN_CONT " - disabling initrd\n");
236 initrd_start = 0;
237 initrd_end = 0;
238}
239
240#else /* !CONFIG_BLK_DEV_INITRD */
241
242static unsigned long __init init_initrd(void)
243{
244 return 0;
245}
246
247#define finalize_initrd() do {} while (0)
248
249#endif
250
251/*
252 * Initialize the bootmem allocator. It also setup initrd related data
253 * if needed.
254 */
255#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
256
257static void __init bootmem_init(void)
258{
259 init_initrd();
260 finalize_initrd();
261}
262
263#else /* !CONFIG_SGI_IP27 */
264
265static void __init bootmem_init(void)
266{
267 phys_addr_t ramstart, ramend;
268 unsigned long start, end;
269 int i;
270
271 ramstart = memblock_start_of_DRAM();
272 ramend = memblock_end_of_DRAM();
273
274 /*
275 * Sanity check any INITRD first. We don't take it into account
276 * for bootmem setup initially, rely on the end-of-kernel-code
277 * as our memory range starting point. Once bootmem is inited we
278 * will reserve the area used for the initrd.
279 */
280 init_initrd();
281
282 /* Reserve memory occupied by kernel. */
283 memblock_reserve(__pa_symbol(&_text),
284 __pa_symbol(&_end) - __pa_symbol(&_text));
285
286 /* max_low_pfn is not a number of pages but the end pfn of low mem */
287
288#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
289 ARCH_PFN_OFFSET = PFN_UP(ramstart);
290#else
291 /*
292 * Reserve any memory between the start of RAM and PHYS_OFFSET
293 */
294 if (ramstart > PHYS_OFFSET)
295 memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
296
297 if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
298 pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
299 (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
300 (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
301 }
302#endif
303
304 min_low_pfn = ARCH_PFN_OFFSET;
305 max_pfn = PFN_DOWN(ramend);
306 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
307 /*
308 * Skip highmem here so we get an accurate max_low_pfn if low
309 * memory stops short of high memory.
310 * If the region overlaps HIGHMEM_START, end is clipped so
311 * max_pfn excludes the highmem portion.
312 */
313 if (start >= PFN_DOWN(HIGHMEM_START))
314 continue;
315 if (end > PFN_DOWN(HIGHMEM_START))
316 end = PFN_DOWN(HIGHMEM_START);
317 if (end > max_low_pfn)
318 max_low_pfn = end;
319 }
320
321 if (min_low_pfn >= max_low_pfn)
322 panic("Incorrect memory mapping !!!");
323
324 if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
325 max_low_pfn = PFN_DOWN(HIGHMEM_START);
326#ifdef CONFIG_HIGHMEM
327 highstart_pfn = max_low_pfn;
328 highend_pfn = max_pfn;
329#else
330 max_pfn = max_low_pfn;
331#endif
332 }
333
334 /*
335 * Reserve initrd memory if needed.
336 */
337 finalize_initrd();
338}
339
340#endif /* CONFIG_SGI_IP27 */
341
342static int usermem __initdata;
343
344static int __init early_parse_mem(char *p)
345{
346 phys_addr_t start, size;
347
348 if (!p) {
349 pr_err("mem parameter is empty, do nothing\n");
350 return -EINVAL;
351 }
352
353 /*
354 * If a user specifies memory size, we
355 * blow away any automatically generated
356 * size.
357 */
358 if (usermem == 0) {
359 usermem = 1;
360 memblock_remove(memblock_start_of_DRAM(),
361 memblock_end_of_DRAM() - memblock_start_of_DRAM());
362 }
363 start = 0;
364 size = memparse(p, &p);
365 if (*p == '@')
366 start = memparse(p + 1, &p);
367
368 if (IS_ENABLED(CONFIG_NUMA))
369 memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
370 else
371 memblock_add(start, size);
372
373 return 0;
374}
375early_param("mem", early_parse_mem);
376
377static int __init early_parse_memmap(char *p)
378{
379 char *oldp;
380 u64 start_at, mem_size;
381
382 if (!p)
383 return -EINVAL;
384
385 if (!strncmp(p, "exactmap", 8)) {
386 pr_err("\"memmap=exactmap\" invalid on MIPS\n");
387 return 0;
388 }
389
390 oldp = p;
391 mem_size = memparse(p, &p);
392 if (p == oldp)
393 return -EINVAL;
394
395 if (*p == '@') {
396 start_at = memparse(p+1, &p);
397 memblock_add(start_at, mem_size);
398 } else if (*p == '#') {
399 pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
400 return -EINVAL;
401 } else if (*p == '$') {
402 start_at = memparse(p+1, &p);
403 memblock_add(start_at, mem_size);
404 memblock_reserve(start_at, mem_size);
405 } else {
406 pr_err("\"memmap\" invalid format!\n");
407 return -EINVAL;
408 }
409
410 if (*p == '\0') {
411 usermem = 1;
412 return 0;
413 } else
414 return -EINVAL;
415}
416early_param("memmap", early_parse_memmap);
417
418static void __init mips_reserve_vmcore(void)
419{
420#ifdef CONFIG_PROC_VMCORE
421 phys_addr_t start, end;
422 u64 i;
423
424 if (!elfcorehdr_size) {
425 for_each_mem_range(i, &start, &end) {
426 if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
427 /*
428 * Reserve from the elf core header to the end of
429 * the memory segment, that should all be kdump
430 * reserved memory.
431 */
432 elfcorehdr_size = end - elfcorehdr_addr;
433 break;
434 }
435 }
436 }
437
438 pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
439 (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
440
441 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
442#endif
443}
444
445/* 64M alignment for crash kernel regions */
446#define CRASH_ALIGN SZ_64M
447#define CRASH_ADDR_MAX SZ_512M
448
449static void __init mips_parse_crashkernel(void)
450{
451 unsigned long long total_mem;
452 unsigned long long crash_size, crash_base;
453 int ret;
454
455 if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
456 return;
457
458 total_mem = memblock_phys_mem_size();
459 ret = parse_crashkernel(boot_command_line, total_mem,
460 &crash_size, &crash_base,
461 NULL, NULL);
462 if (ret != 0 || crash_size <= 0)
463 return;
464
465 if (crash_base <= 0) {
466 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
467 CRASH_ALIGN,
468 CRASH_ADDR_MAX);
469 if (!crash_base) {
470 pr_warn("crashkernel reservation failed - No suitable area found.\n");
471 return;
472 }
473 } else {
474 unsigned long long start;
475
476 start = memblock_phys_alloc_range(crash_size, 1,
477 crash_base,
478 crash_base + crash_size);
479 if (start != crash_base) {
480 pr_warn("Invalid memory region reserved for crash kernel\n");
481 return;
482 }
483 }
484
485 crashk_res.start = crash_base;
486 crashk_res.end = crash_base + crash_size - 1;
487}
488
489static void __init request_crashkernel(struct resource *res)
490{
491 int ret;
492
493 if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
494 return;
495
496 if (crashk_res.start == crashk_res.end)
497 return;
498
499 ret = request_resource(res, &crashk_res);
500 if (!ret)
501 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
502 (unsigned long)(resource_size(&crashk_res) >> 20),
503 (unsigned long)(crashk_res.start >> 20));
504}
505
506static void __init check_kernel_sections_mem(void)
507{
508 phys_addr_t start = __pa_symbol(&_text);
509 phys_addr_t size = __pa_symbol(&_end) - start;
510
511 if (!memblock_is_region_memory(start, size)) {
512 pr_info("Kernel sections are not in the memory maps\n");
513 memblock_add(start, size);
514 }
515}
516
517static void __init bootcmdline_append(const char *s, size_t max)
518{
519 if (!s[0] || !max)
520 return;
521
522 if (boot_command_line[0])
523 strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
524
525 strlcat(boot_command_line, s, max);
526}
527
528#ifdef CONFIG_OF_EARLY_FLATTREE
529
530static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
531 int depth, void *data)
532{
533 bool *dt_bootargs = data;
534 const char *p;
535 int l;
536
537 if (depth != 1 || !data ||
538 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
539 return 0;
540
541 p = of_get_flat_dt_prop(node, "bootargs", &l);
542 if (p != NULL && l > 0) {
543 bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
544 *dt_bootargs = true;
545 }
546
547 return 1;
548}
549
550#endif /* CONFIG_OF_EARLY_FLATTREE */
551
552static void __init bootcmdline_init(void)
553{
554 bool dt_bootargs = false;
555
556 /*
557 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
558 * trivial - we simply use the built-in command line unconditionally &
559 * unmodified.
560 */
561 if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
562 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
563 return;
564 }
565
566 /*
567 * If the user specified a built-in command line &
568 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
569 * prepended to arguments from the bootloader or DT so we'll copy them
570 * to the start of boot_command_line here. Otherwise, empty
571 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
572 */
573 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
574 strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
575 else
576 boot_command_line[0] = 0;
577
578#ifdef CONFIG_OF_EARLY_FLATTREE
579 /*
580 * If we're configured to take boot arguments from DT, look for those
581 * now.
582 */
583 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
584 IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
585 of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
586#endif
587
588 /*
589 * If we didn't get any arguments from DT (regardless of whether that's
590 * because we weren't configured to look for them, or because we looked
591 * & found none) then we'll take arguments from the bootloader.
592 * plat_mem_setup() should have filled arcs_cmdline with arguments from
593 * the bootloader.
594 */
595 if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
596 bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
597
598 /*
599 * If the user specified a built-in command line & we didn't already
600 * prepend it, we append it to boot_command_line here.
601 */
602 if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
603 !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
604 bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
605}
606
607/*
608 * arch_mem_init - initialize memory management subsystem
609 *
610 * o plat_mem_setup() detects the memory configuration and will record detected
611 * memory areas using memblock_add.
612 *
613 * At this stage the memory configuration of the system is known to the
614 * kernel but generic memory management system is still entirely uninitialized.
615 *
616 * o bootmem_init()
617 * o sparse_init()
618 * o paging_init()
619 * o dma_contiguous_reserve()
620 *
621 * At this stage the bootmem allocator is ready to use.
622 *
623 * NOTE: historically plat_mem_setup did the entire platform initialization.
624 * This was rather impractical because it meant plat_mem_setup had to
625 * get away without any kind of memory allocator. To keep old code from
626 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
627 * initialization hook for anything else was introduced.
628 */
629static void __init arch_mem_init(char **cmdline_p)
630{
631 /* call board setup routine */
632 plat_mem_setup();
633 memblock_set_bottom_up(true);
634
635 bootcmdline_init();
636 strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
637 *cmdline_p = command_line;
638
639 parse_early_param();
640
641 if (usermem)
642 pr_info("User-defined physical RAM map overwrite\n");
643
644 check_kernel_sections_mem();
645
646 early_init_fdt_reserve_self();
647 early_init_fdt_scan_reserved_mem();
648
649#ifndef CONFIG_NUMA
650 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
651#endif
652 bootmem_init();
653
654 /*
655 * Prevent memblock from allocating high memory.
656 * This cannot be done before max_low_pfn is detected, so up
657 * to this point is possible to only reserve physical memory
658 * with memblock_reserve; memblock_alloc* can be used
659 * only after this point
660 */
661 memblock_set_current_limit(PFN_PHYS(max_low_pfn));
662
663 mips_reserve_vmcore();
664
665 mips_parse_crashkernel();
666 device_tree_init();
667
668 /*
669 * In order to reduce the possibility of kernel panic when failed to
670 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
671 * low memory as small as possible before plat_swiotlb_setup(), so
672 * make sparse_init() using top-down allocation.
673 */
674 memblock_set_bottom_up(false);
675 sparse_init();
676 memblock_set_bottom_up(true);
677
678 plat_swiotlb_setup();
679
680 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
681
682 /* Reserve for hibernation. */
683 memblock_reserve(__pa_symbol(&__nosave_begin),
684 __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
685
686 early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
687}
688
689static void __init resource_init(void)
690{
691 phys_addr_t start, end;
692 u64 i;
693
694 if (UNCAC_BASE != IO_BASE)
695 return;
696
697 code_resource.start = __pa_symbol(&_text);
698 code_resource.end = __pa_symbol(&_etext) - 1;
699 data_resource.start = __pa_symbol(&_etext);
700 data_resource.end = __pa_symbol(&_edata) - 1;
701 bss_resource.start = __pa_symbol(&__bss_start);
702 bss_resource.end = __pa_symbol(&__bss_stop) - 1;
703
704 for_each_mem_range(i, &start, &end) {
705 struct resource *res;
706
707 res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
708 if (!res)
709 panic("%s: Failed to allocate %zu bytes\n", __func__,
710 sizeof(struct resource));
711
712 res->start = start;
713 /*
714 * In memblock, end points to the first byte after the
715 * range while in resourses, end points to the last byte in
716 * the range.
717 */
718 res->end = end - 1;
719 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
720 res->name = "System RAM";
721
722 request_resource(&iomem_resource, res);
723
724 /*
725 * We don't know which RAM region contains kernel data,
726 * so we try it repeatedly and let the resource manager
727 * test it.
728 */
729 request_resource(res, &code_resource);
730 request_resource(res, &data_resource);
731 request_resource(res, &bss_resource);
732 request_crashkernel(res);
733 }
734}
735
736#ifdef CONFIG_SMP
737static void __init prefill_possible_map(void)
738{
739 int i, possible = num_possible_cpus();
740
741 if (possible > nr_cpu_ids)
742 possible = nr_cpu_ids;
743
744 for (i = 0; i < possible; i++)
745 set_cpu_possible(i, true);
746 for (; i < NR_CPUS; i++)
747 set_cpu_possible(i, false);
748
749 set_nr_cpu_ids(possible);
750}
751#else
752static inline void prefill_possible_map(void) {}
753#endif
754
755static void __init setup_rng_seed(void)
756{
757 char *rng_seed_hex = fw_getenv("rngseed");
758 u8 rng_seed[512];
759 size_t len;
760
761 if (!rng_seed_hex)
762 return;
763
764 len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2);
765 if (hex2bin(rng_seed, rng_seed_hex, len))
766 return;
767
768 add_bootloader_randomness(rng_seed, len);
769 memzero_explicit(rng_seed, len);
770 memzero_explicit(rng_seed_hex, len * 2);
771}
772
773void __init setup_arch(char **cmdline_p)
774{
775 cpu_probe();
776 mips_cm_probe();
777 prom_init();
778
779 setup_early_fdc_console();
780#ifdef CONFIG_EARLY_PRINTK
781 setup_early_printk();
782#endif
783 cpu_report();
784 if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
785 check_bugs64_early();
786
787 arch_mem_init(cmdline_p);
788 dmi_setup();
789
790 resource_init();
791 plat_smp_setup();
792 prefill_possible_map();
793
794 cpu_cache_init();
795 paging_init();
796
797 memblock_dump_all();
798
799 setup_rng_seed();
800}
801
802unsigned long kernelsp[NR_CPUS];
803unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
804
805#ifdef CONFIG_DEBUG_FS
806struct dentry *mips_debugfs_dir;
807static int __init debugfs_mips(void)
808{
809 mips_debugfs_dir = debugfs_create_dir("mips", NULL);
810 return 0;
811}
812arch_initcall(debugfs_mips);
813#endif
814
815#ifdef CONFIG_DMA_NONCOHERENT
816static int __init setcoherentio(char *str)
817{
818 dma_default_coherent = true;
819 pr_info("Hardware DMA cache coherency (command line)\n");
820 return 0;
821}
822early_param("coherentio", setcoherentio);
823
824static int __init setnocoherentio(char *str)
825{
826 dma_default_coherent = false;
827 pr_info("Software DMA cache coherency (command line)\n");
828 return 0;
829}
830early_param("nocoherentio", setnocoherentio);
831#endif
832
833void __init arch_cpu_finalize_init(void)
834{
835 unsigned int cpu = smp_processor_id();
836
837 cpu_data[cpu].udelay_val = loops_per_jiffy;
838 check_bugs32();
839
840 if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
841 check_bugs64();
842}