Linux Audio

Check our new training course

Loading...
v5.14.15
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995 Linus Torvalds
  7 * Copyright (C) 1995 Waldorf Electronics
  8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
  9 * Copyright (C) 1996 Stoned Elipot
 10 * Copyright (C) 1999 Silicon Graphics, Inc.
 11 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
 12 */
 13#include <linux/init.h>
 
 
 14#include <linux/ioport.h>
 15#include <linux/export.h>
 16#include <linux/screen_info.h>
 17#include <linux/memblock.h>
 18#include <linux/initrd.h>
 19#include <linux/root_dev.h>
 20#include <linux/highmem.h>
 21#include <linux/console.h>
 22#include <linux/pfn.h>
 23#include <linux/debugfs.h>
 24#include <linux/kexec.h>
 25#include <linux/sizes.h>
 26#include <linux/device.h>
 27#include <linux/dma-map-ops.h>
 28#include <linux/decompress/generic.h>
 29#include <linux/of_fdt.h>
 30#include <linux/dmi.h>
 31#include <linux/crash_dump.h>
 32
 33#include <asm/addrspace.h>
 34#include <asm/bootinfo.h>
 35#include <asm/bugs.h>
 36#include <asm/cache.h>
 37#include <asm/cdmm.h>
 38#include <asm/cpu.h>
 39#include <asm/debug.h>
 
 40#include <asm/sections.h>
 41#include <asm/setup.h>
 42#include <asm/smp-ops.h>
 
 43#include <asm/prom.h>
 
 44
 45#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
 46char __section(".appended_dtb") __appended_dtb[0x100000];
 47#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 48
 49struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
 50
 51EXPORT_SYMBOL(cpu_data);
 52
 53#ifdef CONFIG_VT
 54struct screen_info screen_info;
 55#endif
 56
 57/*
 58 * Setup information
 59 *
 60 * These are initialized so they are in the .data section
 61 */
 62unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
 63
 64EXPORT_SYMBOL(mips_machtype);
 65
 66static char __initdata command_line[COMMAND_LINE_SIZE];
 67char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
 68
 69#ifdef CONFIG_CMDLINE_BOOL
 70static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
 71#else
 72static const char builtin_cmdline[] __initconst = "";
 73#endif
 74
 75/*
 76 * mips_io_port_base is the begin of the address space to which x86 style
 77 * I/O ports are mapped.
 78 */
 79unsigned long mips_io_port_base = -1;
 80EXPORT_SYMBOL(mips_io_port_base);
 81
 82static struct resource code_resource = { .name = "Kernel code", };
 83static struct resource data_resource = { .name = "Kernel data", };
 84static struct resource bss_resource = { .name = "Kernel bss", };
 85
 86unsigned long __kaslr_offset __ro_after_init;
 87EXPORT_SYMBOL(__kaslr_offset);
 88
 89static void *detect_magic __initdata = detect_memory_region;
 90
 91#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 92unsigned long ARCH_PFN_OFFSET;
 93EXPORT_SYMBOL(ARCH_PFN_OFFSET);
 94#endif
 95
 96void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
 97{
 98	void *dm = &detect_magic;
 99	phys_addr_t size;
100
101	for (size = sz_min; size < sz_max; size <<= 1) {
102		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
103			break;
104	}
105
106	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
107		((unsigned long long) size) / SZ_1M,
108		(unsigned long long) start,
109		((unsigned long long) sz_min) / SZ_1M,
110		((unsigned long long) sz_max) / SZ_1M);
111
112	memblock_add(start, size);
113}
114
115/*
116 * Manage initrd
117 */
118#ifdef CONFIG_BLK_DEV_INITRD
119
120static int __init rd_start_early(char *p)
121{
122	unsigned long start = memparse(p, &p);
123
124#ifdef CONFIG_64BIT
125	/* Guess if the sign extension was forgotten by bootloader */
126	if (start < XKPHYS)
127		start = (int)start;
128#endif
129	initrd_start = start;
130	initrd_end += start;
131	return 0;
132}
133early_param("rd_start", rd_start_early);
134
135static int __init rd_size_early(char *p)
136{
137	initrd_end += memparse(p, &p);
138	return 0;
139}
140early_param("rd_size", rd_size_early);
141
142/* it returns the next free pfn after initrd */
143static unsigned long __init init_initrd(void)
144{
145	unsigned long end;
146
147	/*
148	 * Board specific code or command line parser should have
149	 * already set up initrd_start and initrd_end. In these cases
150	 * perfom sanity checks and use them if all looks good.
151	 */
152	if (!initrd_start || initrd_end <= initrd_start)
153		goto disable;
154
155	if (initrd_start & ~PAGE_MASK) {
156		pr_err("initrd start must be page aligned\n");
157		goto disable;
158	}
159	if (initrd_start < PAGE_OFFSET) {
160		pr_err("initrd start < PAGE_OFFSET\n");
161		goto disable;
162	}
163
164	/*
165	 * Sanitize initrd addresses. For example firmware
166	 * can't guess if they need to pass them through
167	 * 64-bits values if the kernel has been built in pure
168	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
169	 * addresses now, so the code can now safely use __pa().
170	 */
171	end = __pa(initrd_end);
172	initrd_end = (unsigned long)__va(end);
173	initrd_start = (unsigned long)__va(__pa(initrd_start));
174
 
 
 
 
 
175	ROOT_DEV = Root_RAM0;
176	return PFN_UP(end);
177disable:
178	initrd_start = 0;
179	initrd_end = 0;
180	return 0;
181}
182
183/* In some conditions (e.g. big endian bootloader with a little endian
184   kernel), the initrd might appear byte swapped.  Try to detect this and
185   byte swap it if needed.  */
186static void __init maybe_bswap_initrd(void)
187{
188#if defined(CONFIG_CPU_CAVIUM_OCTEON)
189	u64 buf;
190
191	/* Check for CPIO signature */
192	if (!memcmp((void *)initrd_start, "070701", 6))
193		return;
194
195	/* Check for compressed initrd */
196	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
197		return;
198
199	/* Try again with a byte swapped header */
200	buf = swab64p((u64 *)initrd_start);
201	if (!memcmp(&buf, "070701", 6) ||
202	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
203		unsigned long i;
204
205		pr_info("Byteswapped initrd detected\n");
206		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
207			swab64s((u64 *)i);
208	}
209#endif
210}
211
212static void __init finalize_initrd(void)
213{
214	unsigned long size = initrd_end - initrd_start;
215
216	if (size == 0) {
217		printk(KERN_INFO "Initrd not found or empty");
218		goto disable;
219	}
220	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
221		printk(KERN_ERR "Initrd extends beyond end of memory");
222		goto disable;
223	}
224
225	maybe_bswap_initrd();
226
227	memblock_reserve(__pa(initrd_start), size);
228	initrd_below_start_ok = 1;
229
230	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
231		initrd_start, size);
232	return;
233disable:
234	printk(KERN_CONT " - disabling initrd\n");
235	initrd_start = 0;
236	initrd_end = 0;
237}
238
239#else  /* !CONFIG_BLK_DEV_INITRD */
240
241static unsigned long __init init_initrd(void)
242{
243	return 0;
244}
245
246#define finalize_initrd()	do {} while (0)
247
248#endif
249
250/*
251 * Initialize the bootmem allocator. It also setup initrd related data
252 * if needed.
253 */
254#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
255
256static void __init bootmem_init(void)
257{
258	init_initrd();
259	finalize_initrd();
260}
261
262#else  /* !CONFIG_SGI_IP27 */
263
264static void __init bootmem_init(void)
265{
266	phys_addr_t ramstart, ramend;
267	unsigned long start, end;
268	int i;
269
270	ramstart = memblock_start_of_DRAM();
271	ramend = memblock_end_of_DRAM();
272
273	/*
274	 * Sanity check any INITRD first. We don't take it into account
275	 * for bootmem setup initially, rely on the end-of-kernel-code
276	 * as our memory range starting point. Once bootmem is inited we
277	 * will reserve the area used for the initrd.
278	 */
279	init_initrd();
280
281	/* Reserve memory occupied by kernel. */
282	memblock_reserve(__pa_symbol(&_text),
283			__pa_symbol(&_end) - __pa_symbol(&_text));
284
285	/* max_low_pfn is not a number of pages but the end pfn of low mem */
286
287#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
288	ARCH_PFN_OFFSET = PFN_UP(ramstart);
289#else
290	/*
291	 * Reserve any memory between the start of RAM and PHYS_OFFSET
292	 */
293	if (ramstart > PHYS_OFFSET)
294		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
295
296	if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
297		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
298			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
299			(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
300	}
301#endif
302
303	min_low_pfn = ARCH_PFN_OFFSET;
304	max_pfn = PFN_DOWN(ramend);
305	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
306		/*
307		 * Skip highmem here so we get an accurate max_low_pfn if low
308		 * memory stops short of high memory.
309		 * If the region overlaps HIGHMEM_START, end is clipped so
310		 * max_pfn excludes the highmem portion.
311		 */
312		if (start >= PFN_DOWN(HIGHMEM_START))
313			continue;
314		if (end > PFN_DOWN(HIGHMEM_START))
315			end = PFN_DOWN(HIGHMEM_START);
316		if (end > max_low_pfn)
317			max_low_pfn = end;
318	}
319
320	if (min_low_pfn >= max_low_pfn)
321		panic("Incorrect memory mapping !!!");
322
323	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
 
324#ifdef CONFIG_HIGHMEM
325		highstart_pfn = PFN_DOWN(HIGHMEM_START);
326		highend_pfn = max_pfn;
327#else
328		max_low_pfn = PFN_DOWN(HIGHMEM_START);
329		max_pfn = max_low_pfn;
330#endif
331	}
332
333	/*
334	 * Reserve initrd memory if needed.
335	 */
336	finalize_initrd();
337}
338
339#endif	/* CONFIG_SGI_IP27 */
340
341static int usermem __initdata;
342
343static int __init early_parse_mem(char *p)
344{
345	phys_addr_t start, size;
346
 
 
 
 
 
347	/*
348	 * If a user specifies memory size, we
349	 * blow away any automatically generated
350	 * size.
351	 */
352	if (usermem == 0) {
353		usermem = 1;
354		memblock_remove(memblock_start_of_DRAM(),
355			memblock_end_of_DRAM() - memblock_start_of_DRAM());
356	}
357	start = 0;
358	size = memparse(p, &p);
359	if (*p == '@')
360		start = memparse(p + 1, &p);
361
362	memblock_add(start, size);
 
 
 
363
364	return 0;
365}
366early_param("mem", early_parse_mem);
367
368static int __init early_parse_memmap(char *p)
369{
370	char *oldp;
371	u64 start_at, mem_size;
372
373	if (!p)
374		return -EINVAL;
375
376	if (!strncmp(p, "exactmap", 8)) {
377		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
378		return 0;
379	}
380
381	oldp = p;
382	mem_size = memparse(p, &p);
383	if (p == oldp)
384		return -EINVAL;
385
386	if (*p == '@') {
387		start_at = memparse(p+1, &p);
388		memblock_add(start_at, mem_size);
389	} else if (*p == '#') {
390		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
391		return -EINVAL;
392	} else if (*p == '$') {
393		start_at = memparse(p+1, &p);
394		memblock_add(start_at, mem_size);
395		memblock_reserve(start_at, mem_size);
396	} else {
397		pr_err("\"memmap\" invalid format!\n");
398		return -EINVAL;
399	}
400
401	if (*p == '\0') {
402		usermem = 1;
403		return 0;
404	} else
405		return -EINVAL;
406}
407early_param("memmap", early_parse_memmap);
408
409static void __init mips_reserve_vmcore(void)
410{
411#ifdef CONFIG_PROC_VMCORE
412	phys_addr_t start, end;
413	u64 i;
414
415	if (!elfcorehdr_size) {
416		for_each_mem_range(i, &start, &end) {
417			if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
418				/*
419				 * Reserve from the elf core header to the end of
420				 * the memory segment, that should all be kdump
421				 * reserved memory.
422				 */
423				elfcorehdr_size = end - elfcorehdr_addr;
424				break;
425			}
426		}
427	}
428
429	pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
430		(unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
431
432	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
433#endif
434}
435
436#ifdef CONFIG_KEXEC
437
438/* 64M alignment for crash kernel regions */
439#define CRASH_ALIGN	SZ_64M
440#define CRASH_ADDR_MAX	SZ_512M
441
442static void __init mips_parse_crashkernel(void)
443{
444	unsigned long long total_mem;
445	unsigned long long crash_size, crash_base;
446	int ret;
447
 
 
 
448	total_mem = memblock_phys_mem_size();
449	ret = parse_crashkernel(boot_command_line, total_mem,
450				&crash_size, &crash_base);
 
451	if (ret != 0 || crash_size <= 0)
452		return;
453
454	if (crash_base <= 0) {
455		crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX,
456							crash_size, CRASH_ALIGN);
 
457		if (!crash_base) {
458			pr_warn("crashkernel reservation failed - No suitable area found.\n");
459			return;
460		}
461	} else {
462		unsigned long long start;
463
464		start = memblock_find_in_range(crash_base, crash_base + crash_size,
465						crash_size, 1);
 
466		if (start != crash_base) {
467			pr_warn("Invalid memory region reserved for crash kernel\n");
468			return;
469		}
470	}
471
472	crashk_res.start = crash_base;
473	crashk_res.end	 = crash_base + crash_size - 1;
474}
475
476static void __init request_crashkernel(struct resource *res)
477{
478	int ret;
479
 
 
 
480	if (crashk_res.start == crashk_res.end)
481		return;
482
483	ret = request_resource(res, &crashk_res);
484	if (!ret)
485		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
486			(unsigned long)(resource_size(&crashk_res) >> 20),
487			(unsigned long)(crashk_res.start  >> 20));
488}
489#else /* !defined(CONFIG_KEXEC)		*/
490static void __init mips_parse_crashkernel(void)
491{
492}
493
494static void __init request_crashkernel(struct resource *res)
495{
496}
497#endif /* !defined(CONFIG_KEXEC)  */
498
499static void __init check_kernel_sections_mem(void)
500{
501	phys_addr_t start = __pa_symbol(&_text);
502	phys_addr_t size = __pa_symbol(&_end) - start;
503
504	if (!memblock_is_region_memory(start, size)) {
505		pr_info("Kernel sections are not in the memory maps\n");
506		memblock_add(start, size);
507	}
508}
509
510static void __init bootcmdline_append(const char *s, size_t max)
511{
512	if (!s[0] || !max)
513		return;
514
515	if (boot_command_line[0])
516		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
517
518	strlcat(boot_command_line, s, max);
519}
520
521#ifdef CONFIG_OF_EARLY_FLATTREE
522
523static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
524					  int depth, void *data)
525{
526	bool *dt_bootargs = data;
527	const char *p;
528	int l;
529
530	if (depth != 1 || !data ||
531	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
532		return 0;
533
534	p = of_get_flat_dt_prop(node, "bootargs", &l);
535	if (p != NULL && l > 0) {
536		bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
537		*dt_bootargs = true;
538	}
539
540	return 1;
541}
542
543#endif /* CONFIG_OF_EARLY_FLATTREE */
544
545static void __init bootcmdline_init(void)
546{
547	bool dt_bootargs = false;
548
549	/*
550	 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
551	 * trivial - we simply use the built-in command line unconditionally &
552	 * unmodified.
553	 */
554	if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
555		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
556		return;
557	}
558
559	/*
560	 * If the user specified a built-in command line &
561	 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
562	 * prepended to arguments from the bootloader or DT so we'll copy them
563	 * to the start of boot_command_line here. Otherwise, empty
564	 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
565	 */
566	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
567		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
568	else
569		boot_command_line[0] = 0;
570
571#ifdef CONFIG_OF_EARLY_FLATTREE
572	/*
573	 * If we're configured to take boot arguments from DT, look for those
574	 * now.
575	 */
576	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
577	    IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
578		of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
579#endif
580
581	/*
582	 * If we didn't get any arguments from DT (regardless of whether that's
583	 * because we weren't configured to look for them, or because we looked
584	 * & found none) then we'll take arguments from the bootloader.
585	 * plat_mem_setup() should have filled arcs_cmdline with arguments from
586	 * the bootloader.
587	 */
588	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
589		bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
590
591	/*
592	 * If the user specified a built-in command line & we didn't already
593	 * prepend it, we append it to boot_command_line here.
594	 */
595	if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
596	    !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
597		bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
598}
599
600/*
601 * arch_mem_init - initialize memory management subsystem
602 *
603 *  o plat_mem_setup() detects the memory configuration and will record detected
604 *    memory areas using memblock_add.
605 *
606 * At this stage the memory configuration of the system is known to the
607 * kernel but generic memory management system is still entirely uninitialized.
608 *
609 *  o bootmem_init()
610 *  o sparse_init()
611 *  o paging_init()
612 *  o dma_contiguous_reserve()
613 *
614 * At this stage the bootmem allocator is ready to use.
615 *
616 * NOTE: historically plat_mem_setup did the entire platform initialization.
617 *	 This was rather impractical because it meant plat_mem_setup had to
618 * get away without any kind of memory allocator.  To keep old code from
619 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
620 * initialization hook for anything else was introduced.
621 */
622static void __init arch_mem_init(char **cmdline_p)
623{
624	/* call board setup routine */
625	plat_mem_setup();
626	memblock_set_bottom_up(true);
627
628	bootcmdline_init();
629	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
630	*cmdline_p = command_line;
631
632	parse_early_param();
633
634	if (usermem)
635		pr_info("User-defined physical RAM map overwrite\n");
636
637	check_kernel_sections_mem();
638
639	early_init_fdt_reserve_self();
640	early_init_fdt_scan_reserved_mem();
641
642#ifndef CONFIG_NUMA
643	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
644#endif
645	bootmem_init();
646
647	/*
648	 * Prevent memblock from allocating high memory.
649	 * This cannot be done before max_low_pfn is detected, so up
650	 * to this point is possible to only reserve physical memory
651	 * with memblock_reserve; memblock_alloc* can be used
652	 * only after this point
653	 */
654	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
655
656	mips_reserve_vmcore();
657
658	mips_parse_crashkernel();
659#ifdef CONFIG_KEXEC
660	if (crashk_res.start != crashk_res.end)
661		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
662#endif
663	device_tree_init();
664
665	/*
666	 * In order to reduce the possibility of kernel panic when failed to
667	 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
668	 * low memory as small as possible before plat_swiotlb_setup(), so
669	 * make sparse_init() using top-down allocation.
670	 */
671	memblock_set_bottom_up(false);
672	sparse_init();
673	memblock_set_bottom_up(true);
674
675	plat_swiotlb_setup();
676
677	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
678
679	/* Reserve for hibernation. */
680	memblock_reserve(__pa_symbol(&__nosave_begin),
681		__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
682
683	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
684}
685
686static void __init resource_init(void)
687{
688	phys_addr_t start, end;
689	u64 i;
690
691	if (UNCAC_BASE != IO_BASE)
692		return;
693
694	code_resource.start = __pa_symbol(&_text);
695	code_resource.end = __pa_symbol(&_etext) - 1;
696	data_resource.start = __pa_symbol(&_etext);
697	data_resource.end = __pa_symbol(&_edata) - 1;
698	bss_resource.start = __pa_symbol(&__bss_start);
699	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
700
701	for_each_mem_range(i, &start, &end) {
702		struct resource *res;
703
704		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
705		if (!res)
706			panic("%s: Failed to allocate %zu bytes\n", __func__,
707			      sizeof(struct resource));
708
709		res->start = start;
710		/*
711		 * In memblock, end points to the first byte after the
712		 * range while in resourses, end points to the last byte in
713		 * the range.
714		 */
715		res->end = end - 1;
716		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
717		res->name = "System RAM";
718
719		request_resource(&iomem_resource, res);
720
721		/*
722		 *  We don't know which RAM region contains kernel data,
723		 *  so we try it repeatedly and let the resource manager
724		 *  test it.
725		 */
726		request_resource(res, &code_resource);
727		request_resource(res, &data_resource);
728		request_resource(res, &bss_resource);
729		request_crashkernel(res);
730	}
731}
732
733#ifdef CONFIG_SMP
734static void __init prefill_possible_map(void)
735{
736	int i, possible = num_possible_cpus();
737
738	if (possible > nr_cpu_ids)
739		possible = nr_cpu_ids;
740
741	for (i = 0; i < possible; i++)
742		set_cpu_possible(i, true);
743	for (; i < NR_CPUS; i++)
744		set_cpu_possible(i, false);
745
746	nr_cpu_ids = possible;
747}
748#else
749static inline void prefill_possible_map(void) {}
750#endif
751
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
752void __init setup_arch(char **cmdline_p)
753{
754	cpu_probe();
755	mips_cm_probe();
756	prom_init();
757
758	setup_early_fdc_console();
759#ifdef CONFIG_EARLY_PRINTK
760	setup_early_printk();
761#endif
762	cpu_report();
763	check_bugs_early();
764
765#if defined(CONFIG_VT)
766#if defined(CONFIG_VGA_CONSOLE)
767	conswitchp = &vga_con;
768#endif
769#endif
770
771	arch_mem_init(cmdline_p);
772	dmi_setup();
773
774	resource_init();
775	plat_smp_setup();
776	prefill_possible_map();
777
778	cpu_cache_init();
779	paging_init();
780
781	memblock_dump_all();
 
 
782}
783
784unsigned long kernelsp[NR_CPUS];
785unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
786
787#ifdef CONFIG_DEBUG_FS
788struct dentry *mips_debugfs_dir;
789static int __init debugfs_mips(void)
790{
791	mips_debugfs_dir = debugfs_create_dir("mips", NULL);
792	return 0;
793}
794arch_initcall(debugfs_mips);
795#endif
796
797#ifdef CONFIG_DMA_NONCOHERENT
798static int __init setcoherentio(char *str)
799{
800	dma_default_coherent = true;
801	pr_info("Hardware DMA cache coherency (command line)\n");
802	return 0;
803}
804early_param("coherentio", setcoherentio);
805
806static int __init setnocoherentio(char *str)
807{
808	dma_default_coherent = true;
809	pr_info("Software DMA cache coherency (command line)\n");
810	return 0;
811}
812early_param("nocoherentio", setnocoherentio);
813#endif
v6.13.7
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995 Linus Torvalds
  7 * Copyright (C) 1995 Waldorf Electronics
  8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
  9 * Copyright (C) 1996 Stoned Elipot
 10 * Copyright (C) 1999 Silicon Graphics, Inc.
 11 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
 12 */
 13#include <linux/init.h>
 14#include <linux/cpu.h>
 15#include <linux/delay.h>
 16#include <linux/ioport.h>
 17#include <linux/export.h>
 
 18#include <linux/memblock.h>
 19#include <linux/initrd.h>
 20#include <linux/root_dev.h>
 21#include <linux/highmem.h>
 22#include <linux/console.h>
 23#include <linux/pfn.h>
 24#include <linux/debugfs.h>
 25#include <linux/kexec.h>
 26#include <linux/sizes.h>
 27#include <linux/device.h>
 28#include <linux/dma-map-ops.h>
 29#include <linux/decompress/generic.h>
 30#include <linux/of_fdt.h>
 31#include <linux/dmi.h>
 32#include <linux/crash_dump.h>
 33
 34#include <asm/addrspace.h>
 35#include <asm/bootinfo.h>
 36#include <asm/bugs.h>
 37#include <asm/cache.h>
 38#include <asm/cdmm.h>
 39#include <asm/cpu.h>
 40#include <asm/debug.h>
 41#include <asm/mmzone.h>
 42#include <asm/sections.h>
 43#include <asm/setup.h>
 44#include <asm/smp-ops.h>
 45#include <asm/mips-cps.h>
 46#include <asm/prom.h>
 47#include <asm/fw/fw.h>
 48
 49#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
 50char __section(".appended_dtb") __appended_dtb[0x100000];
 51#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 52
 53struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
 54
 55EXPORT_SYMBOL(cpu_data);
 56
 
 
 
 
 57/*
 58 * Setup information
 59 *
 60 * These are initialized so they are in the .data section
 61 */
 62unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
 63
 64EXPORT_SYMBOL(mips_machtype);
 65
 66static char __initdata command_line[COMMAND_LINE_SIZE];
 67char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
 68
 69#ifdef CONFIG_CMDLINE_BOOL
 70static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
 71#else
 72static const char builtin_cmdline[] __initconst = "";
 73#endif
 74
 75/*
 76 * mips_io_port_base is the begin of the address space to which x86 style
 77 * I/O ports are mapped.
 78 */
 79unsigned long mips_io_port_base = -1;
 80EXPORT_SYMBOL(mips_io_port_base);
 81
 82static struct resource code_resource = { .name = "Kernel code", };
 83static struct resource data_resource = { .name = "Kernel data", };
 84static struct resource bss_resource = { .name = "Kernel bss", };
 85
 86unsigned long __kaslr_offset __ro_after_init;
 87EXPORT_SYMBOL(__kaslr_offset);
 88
 89static void *detect_magic __initdata = detect_memory_region;
 90
 91#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 92unsigned long ARCH_PFN_OFFSET;
 93EXPORT_SYMBOL(ARCH_PFN_OFFSET);
 94#endif
 95
 96void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
 97{
 98	void *dm = &detect_magic;
 99	phys_addr_t size;
100
101	for (size = sz_min; size < sz_max; size <<= 1) {
102		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
103			break;
104	}
105
106	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
107		((unsigned long long) size) / SZ_1M,
108		(unsigned long long) start,
109		((unsigned long long) sz_min) / SZ_1M,
110		((unsigned long long) sz_max) / SZ_1M);
111
112	memblock_add(start, size);
113}
114
115/*
116 * Manage initrd
117 */
118#ifdef CONFIG_BLK_DEV_INITRD
119
120static int __init rd_start_early(char *p)
121{
122	unsigned long start = memparse(p, &p);
123
124#ifdef CONFIG_64BIT
125	/* Guess if the sign extension was forgotten by bootloader */
126	if (start < XKPHYS)
127		start = (int)start;
128#endif
129	initrd_start = start;
130	initrd_end += start;
131	return 0;
132}
133early_param("rd_start", rd_start_early);
134
135static int __init rd_size_early(char *p)
136{
137	initrd_end += memparse(p, &p);
138	return 0;
139}
140early_param("rd_size", rd_size_early);
141
142/* it returns the next free pfn after initrd */
143static unsigned long __init init_initrd(void)
144{
145	unsigned long end;
146
147	/*
148	 * Board specific code or command line parser should have
149	 * already set up initrd_start and initrd_end. In these cases
150	 * perform sanity checks and use them if all looks good.
151	 */
152	if (!initrd_start || initrd_end <= initrd_start)
153		goto disable;
154
155	if (initrd_start & ~PAGE_MASK) {
156		pr_err("initrd start must be page aligned\n");
157		goto disable;
158	}
 
 
 
 
159
160	/*
161	 * Sanitize initrd addresses. For example firmware
162	 * can't guess if they need to pass them through
163	 * 64-bits values if the kernel has been built in pure
164	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
165	 * addresses now, so the code can now safely use __pa().
166	 */
167	end = __pa(initrd_end);
168	initrd_end = (unsigned long)__va(end);
169	initrd_start = (unsigned long)__va(__pa(initrd_start));
170
171	if (initrd_start < PAGE_OFFSET) {
172		pr_err("initrd start < PAGE_OFFSET\n");
173		goto disable;
174	}
175
176	ROOT_DEV = Root_RAM0;
177	return PFN_UP(end);
178disable:
179	initrd_start = 0;
180	initrd_end = 0;
181	return 0;
182}
183
184/* In some conditions (e.g. big endian bootloader with a little endian
185   kernel), the initrd might appear byte swapped.  Try to detect this and
186   byte swap it if needed.  */
187static void __init maybe_bswap_initrd(void)
188{
189#if defined(CONFIG_CPU_CAVIUM_OCTEON)
190	u64 buf;
191
192	/* Check for CPIO signature */
193	if (!memcmp((void *)initrd_start, "070701", 6))
194		return;
195
196	/* Check for compressed initrd */
197	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
198		return;
199
200	/* Try again with a byte swapped header */
201	buf = swab64p((u64 *)initrd_start);
202	if (!memcmp(&buf, "070701", 6) ||
203	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
204		unsigned long i;
205
206		pr_info("Byteswapped initrd detected\n");
207		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
208			swab64s((u64 *)i);
209	}
210#endif
211}
212
213static void __init finalize_initrd(void)
214{
215	unsigned long size = initrd_end - initrd_start;
216
217	if (size == 0) {
218		printk(KERN_INFO "Initrd not found or empty");
219		goto disable;
220	}
221	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
222		printk(KERN_ERR "Initrd extends beyond end of memory");
223		goto disable;
224	}
225
226	maybe_bswap_initrd();
227
228	memblock_reserve(__pa(initrd_start), size);
229	initrd_below_start_ok = 1;
230
231	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
232		initrd_start, size);
233	return;
234disable:
235	printk(KERN_CONT " - disabling initrd\n");
236	initrd_start = 0;
237	initrd_end = 0;
238}
239
240#else  /* !CONFIG_BLK_DEV_INITRD */
241
242static unsigned long __init init_initrd(void)
243{
244	return 0;
245}
246
247#define finalize_initrd()	do {} while (0)
248
249#endif
250
251/*
252 * Initialize the bootmem allocator. It also setup initrd related data
253 * if needed.
254 */
255#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
256
257static void __init bootmem_init(void)
258{
259	init_initrd();
260	finalize_initrd();
261}
262
263#else  /* !CONFIG_SGI_IP27 */
264
265static void __init bootmem_init(void)
266{
267	phys_addr_t ramstart, ramend;
268	unsigned long start, end;
269	int i;
270
271	ramstart = memblock_start_of_DRAM();
272	ramend = memblock_end_of_DRAM();
273
274	/*
275	 * Sanity check any INITRD first. We don't take it into account
276	 * for bootmem setup initially, rely on the end-of-kernel-code
277	 * as our memory range starting point. Once bootmem is inited we
278	 * will reserve the area used for the initrd.
279	 */
280	init_initrd();
281
282	/* Reserve memory occupied by kernel. */
283	memblock_reserve(__pa_symbol(&_text),
284			__pa_symbol(&_end) - __pa_symbol(&_text));
285
286	/* max_low_pfn is not a number of pages but the end pfn of low mem */
287
288#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
289	ARCH_PFN_OFFSET = PFN_UP(ramstart);
290#else
291	/*
292	 * Reserve any memory between the start of RAM and PHYS_OFFSET
293	 */
294	if (ramstart > PHYS_OFFSET)
295		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
296
297	if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
298		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
299			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
300			(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
301	}
302#endif
303
304	min_low_pfn = ARCH_PFN_OFFSET;
305	max_pfn = PFN_DOWN(ramend);
306	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
307		/*
308		 * Skip highmem here so we get an accurate max_low_pfn if low
309		 * memory stops short of high memory.
310		 * If the region overlaps HIGHMEM_START, end is clipped so
311		 * max_pfn excludes the highmem portion.
312		 */
313		if (start >= PFN_DOWN(HIGHMEM_START))
314			continue;
315		if (end > PFN_DOWN(HIGHMEM_START))
316			end = PFN_DOWN(HIGHMEM_START);
317		if (end > max_low_pfn)
318			max_low_pfn = end;
319	}
320
321	if (min_low_pfn >= max_low_pfn)
322		panic("Incorrect memory mapping !!!");
323
324	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
325		max_low_pfn = PFN_DOWN(HIGHMEM_START);
326#ifdef CONFIG_HIGHMEM
327		highstart_pfn = max_low_pfn;
328		highend_pfn = max_pfn;
329#else
 
330		max_pfn = max_low_pfn;
331#endif
332	}
333
334	/*
335	 * Reserve initrd memory if needed.
336	 */
337	finalize_initrd();
338}
339
340#endif	/* CONFIG_SGI_IP27 */
341
342static int usermem __initdata;
343
344static int __init early_parse_mem(char *p)
345{
346	phys_addr_t start, size;
347
348	if (!p) {
349		pr_err("mem parameter is empty, do nothing\n");
350		return -EINVAL;
351	}
352
353	/*
354	 * If a user specifies memory size, we
355	 * blow away any automatically generated
356	 * size.
357	 */
358	if (usermem == 0) {
359		usermem = 1;
360		memblock_remove(memblock_start_of_DRAM(),
361			memblock_end_of_DRAM() - memblock_start_of_DRAM());
362	}
363	start = 0;
364	size = memparse(p, &p);
365	if (*p == '@')
366		start = memparse(p + 1, &p);
367
368	if (IS_ENABLED(CONFIG_NUMA))
369		memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
370	else
371		memblock_add(start, size);
372
373	return 0;
374}
375early_param("mem", early_parse_mem);
376
377static int __init early_parse_memmap(char *p)
378{
379	char *oldp;
380	u64 start_at, mem_size;
381
382	if (!p)
383		return -EINVAL;
384
385	if (!strncmp(p, "exactmap", 8)) {
386		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
387		return 0;
388	}
389
390	oldp = p;
391	mem_size = memparse(p, &p);
392	if (p == oldp)
393		return -EINVAL;
394
395	if (*p == '@') {
396		start_at = memparse(p+1, &p);
397		memblock_add(start_at, mem_size);
398	} else if (*p == '#') {
399		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
400		return -EINVAL;
401	} else if (*p == '$') {
402		start_at = memparse(p+1, &p);
403		memblock_add(start_at, mem_size);
404		memblock_reserve(start_at, mem_size);
405	} else {
406		pr_err("\"memmap\" invalid format!\n");
407		return -EINVAL;
408	}
409
410	if (*p == '\0') {
411		usermem = 1;
412		return 0;
413	} else
414		return -EINVAL;
415}
416early_param("memmap", early_parse_memmap);
417
418static void __init mips_reserve_vmcore(void)
419{
420#ifdef CONFIG_PROC_VMCORE
421	phys_addr_t start, end;
422	u64 i;
423
424	if (!elfcorehdr_size) {
425		for_each_mem_range(i, &start, &end) {
426			if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
427				/*
428				 * Reserve from the elf core header to the end of
429				 * the memory segment, that should all be kdump
430				 * reserved memory.
431				 */
432				elfcorehdr_size = end - elfcorehdr_addr;
433				break;
434			}
435		}
436	}
437
438	pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
439		(unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
440
441	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
442#endif
443}
444
 
 
445/* 64M alignment for crash kernel regions */
446#define CRASH_ALIGN	SZ_64M
447#define CRASH_ADDR_MAX	SZ_512M
448
449static void __init mips_parse_crashkernel(void)
450{
451	unsigned long long total_mem;
452	unsigned long long crash_size, crash_base;
453	int ret;
454
455	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
456		return;
457
458	total_mem = memblock_phys_mem_size();
459	ret = parse_crashkernel(boot_command_line, total_mem,
460				&crash_size, &crash_base,
461				NULL, NULL);
462	if (ret != 0 || crash_size <= 0)
463		return;
464
465	if (crash_base <= 0) {
466		crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
467						       CRASH_ALIGN,
468						       CRASH_ADDR_MAX);
469		if (!crash_base) {
470			pr_warn("crashkernel reservation failed - No suitable area found.\n");
471			return;
472		}
473	} else {
474		unsigned long long start;
475
476		start = memblock_phys_alloc_range(crash_size, 1,
477						  crash_base,
478						  crash_base + crash_size);
479		if (start != crash_base) {
480			pr_warn("Invalid memory region reserved for crash kernel\n");
481			return;
482		}
483	}
484
485	crashk_res.start = crash_base;
486	crashk_res.end	 = crash_base + crash_size - 1;
487}
488
489static void __init request_crashkernel(struct resource *res)
490{
491	int ret;
492
493	if (!IS_ENABLED(CONFIG_CRASH_RESERVE))
494		return;
495
496	if (crashk_res.start == crashk_res.end)
497		return;
498
499	ret = request_resource(res, &crashk_res);
500	if (!ret)
501		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
502			(unsigned long)(resource_size(&crashk_res) >> 20),
503			(unsigned long)(crashk_res.start  >> 20));
504}
 
 
 
 
 
 
 
 
 
505
506static void __init check_kernel_sections_mem(void)
507{
508	phys_addr_t start = __pa_symbol(&_text);
509	phys_addr_t size = __pa_symbol(&_end) - start;
510
511	if (!memblock_is_region_memory(start, size)) {
512		pr_info("Kernel sections are not in the memory maps\n");
513		memblock_add(start, size);
514	}
515}
516
517static void __init bootcmdline_append(const char *s, size_t max)
518{
519	if (!s[0] || !max)
520		return;
521
522	if (boot_command_line[0])
523		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
524
525	strlcat(boot_command_line, s, max);
526}
527
528#ifdef CONFIG_OF_EARLY_FLATTREE
529
530static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
531					  int depth, void *data)
532{
533	bool *dt_bootargs = data;
534	const char *p;
535	int l;
536
537	if (depth != 1 || !data ||
538	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
539		return 0;
540
541	p = of_get_flat_dt_prop(node, "bootargs", &l);
542	if (p != NULL && l > 0) {
543		bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
544		*dt_bootargs = true;
545	}
546
547	return 1;
548}
549
550#endif /* CONFIG_OF_EARLY_FLATTREE */
551
552static void __init bootcmdline_init(void)
553{
554	bool dt_bootargs = false;
555
556	/*
557	 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
558	 * trivial - we simply use the built-in command line unconditionally &
559	 * unmodified.
560	 */
561	if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
562		strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
563		return;
564	}
565
566	/*
567	 * If the user specified a built-in command line &
568	 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
569	 * prepended to arguments from the bootloader or DT so we'll copy them
570	 * to the start of boot_command_line here. Otherwise, empty
571	 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
572	 */
573	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
574		strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
575	else
576		boot_command_line[0] = 0;
577
578#ifdef CONFIG_OF_EARLY_FLATTREE
579	/*
580	 * If we're configured to take boot arguments from DT, look for those
581	 * now.
582	 */
583	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
584	    IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
585		of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
586#endif
587
588	/*
589	 * If we didn't get any arguments from DT (regardless of whether that's
590	 * because we weren't configured to look for them, or because we looked
591	 * & found none) then we'll take arguments from the bootloader.
592	 * plat_mem_setup() should have filled arcs_cmdline with arguments from
593	 * the bootloader.
594	 */
595	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
596		bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
597
598	/*
599	 * If the user specified a built-in command line & we didn't already
600	 * prepend it, we append it to boot_command_line here.
601	 */
602	if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
603	    !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
604		bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
605}
606
607/*
608 * arch_mem_init - initialize memory management subsystem
609 *
610 *  o plat_mem_setup() detects the memory configuration and will record detected
611 *    memory areas using memblock_add.
612 *
613 * At this stage the memory configuration of the system is known to the
614 * kernel but generic memory management system is still entirely uninitialized.
615 *
616 *  o bootmem_init()
617 *  o sparse_init()
618 *  o paging_init()
619 *  o dma_contiguous_reserve()
620 *
621 * At this stage the bootmem allocator is ready to use.
622 *
623 * NOTE: historically plat_mem_setup did the entire platform initialization.
624 *	 This was rather impractical because it meant plat_mem_setup had to
625 * get away without any kind of memory allocator.  To keep old code from
626 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
627 * initialization hook for anything else was introduced.
628 */
629static void __init arch_mem_init(char **cmdline_p)
630{
631	/* call board setup routine */
632	plat_mem_setup();
633	memblock_set_bottom_up(true);
634
635	bootcmdline_init();
636	strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
637	*cmdline_p = command_line;
638
639	parse_early_param();
640
641	if (usermem)
642		pr_info("User-defined physical RAM map overwrite\n");
643
644	check_kernel_sections_mem();
645
646	early_init_fdt_reserve_self();
647	early_init_fdt_scan_reserved_mem();
648
649#ifndef CONFIG_NUMA
650	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
651#endif
652	bootmem_init();
653
654	/*
655	 * Prevent memblock from allocating high memory.
656	 * This cannot be done before max_low_pfn is detected, so up
657	 * to this point is possible to only reserve physical memory
658	 * with memblock_reserve; memblock_alloc* can be used
659	 * only after this point
660	 */
661	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
662
663	mips_reserve_vmcore();
664
665	mips_parse_crashkernel();
 
 
 
 
666	device_tree_init();
667
668	/*
669	 * In order to reduce the possibility of kernel panic when failed to
670	 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
671	 * low memory as small as possible before plat_swiotlb_setup(), so
672	 * make sparse_init() using top-down allocation.
673	 */
674	memblock_set_bottom_up(false);
675	sparse_init();
676	memblock_set_bottom_up(true);
677
678	plat_swiotlb_setup();
679
680	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
681
682	/* Reserve for hibernation. */
683	memblock_reserve(__pa_symbol(&__nosave_begin),
684		__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
685
686	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
687}
688
689static void __init resource_init(void)
690{
691	phys_addr_t start, end;
692	u64 i;
693
694	if (UNCAC_BASE != IO_BASE)
695		return;
696
697	code_resource.start = __pa_symbol(&_text);
698	code_resource.end = __pa_symbol(&_etext) - 1;
699	data_resource.start = __pa_symbol(&_etext);
700	data_resource.end = __pa_symbol(&_edata) - 1;
701	bss_resource.start = __pa_symbol(&__bss_start);
702	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
703
704	for_each_mem_range(i, &start, &end) {
705		struct resource *res;
706
707		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
708		if (!res)
709			panic("%s: Failed to allocate %zu bytes\n", __func__,
710			      sizeof(struct resource));
711
712		res->start = start;
713		/*
714		 * In memblock, end points to the first byte after the
715		 * range while in resourses, end points to the last byte in
716		 * the range.
717		 */
718		res->end = end - 1;
719		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
720		res->name = "System RAM";
721
722		request_resource(&iomem_resource, res);
723
724		/*
725		 *  We don't know which RAM region contains kernel data,
726		 *  so we try it repeatedly and let the resource manager
727		 *  test it.
728		 */
729		request_resource(res, &code_resource);
730		request_resource(res, &data_resource);
731		request_resource(res, &bss_resource);
732		request_crashkernel(res);
733	}
734}
735
736#ifdef CONFIG_SMP
737static void __init prefill_possible_map(void)
738{
739	int i, possible = num_possible_cpus();
740
741	if (possible > nr_cpu_ids)
742		possible = nr_cpu_ids;
743
744	for (i = 0; i < possible; i++)
745		set_cpu_possible(i, true);
746	for (; i < NR_CPUS; i++)
747		set_cpu_possible(i, false);
748
749	set_nr_cpu_ids(possible);
750}
751#else
752static inline void prefill_possible_map(void) {}
753#endif
754
755static void __init setup_rng_seed(void)
756{
757	char *rng_seed_hex = fw_getenv("rngseed");
758	u8 rng_seed[512];
759	size_t len;
760
761	if (!rng_seed_hex)
762		return;
763
764	len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2);
765	if (hex2bin(rng_seed, rng_seed_hex, len))
766		return;
767
768	add_bootloader_randomness(rng_seed, len);
769	memzero_explicit(rng_seed, len);
770	memzero_explicit(rng_seed_hex, len * 2);
771}
772
773void __init setup_arch(char **cmdline_p)
774{
775	cpu_probe();
776	mips_cm_probe();
777	prom_init();
778
779	setup_early_fdc_console();
780#ifdef CONFIG_EARLY_PRINTK
781	setup_early_printk();
782#endif
783	cpu_report();
784	if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
785		check_bugs64_early();
 
 
 
 
 
786
787	arch_mem_init(cmdline_p);
788	dmi_setup();
789
790	resource_init();
791	plat_smp_setup();
792	prefill_possible_map();
793
794	cpu_cache_init();
795	paging_init();
796
797	memblock_dump_all();
798
799	setup_rng_seed();
800}
801
802unsigned long kernelsp[NR_CPUS];
803unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
804
805#ifdef CONFIG_DEBUG_FS
806struct dentry *mips_debugfs_dir;
807static int __init debugfs_mips(void)
808{
809	mips_debugfs_dir = debugfs_create_dir("mips", NULL);
810	return 0;
811}
812arch_initcall(debugfs_mips);
813#endif
814
815#ifdef CONFIG_DMA_NONCOHERENT
816static int __init setcoherentio(char *str)
817{
818	dma_default_coherent = true;
819	pr_info("Hardware DMA cache coherency (command line)\n");
820	return 0;
821}
822early_param("coherentio", setcoherentio);
823
824static int __init setnocoherentio(char *str)
825{
826	dma_default_coherent = false;
827	pr_info("Software DMA cache coherency (command line)\n");
828	return 0;
829}
830early_param("nocoherentio", setnocoherentio);
831#endif
832
833void __init arch_cpu_finalize_init(void)
834{
835	unsigned int cpu = smp_processor_id();
836
837	cpu_data[cpu].udelay_val = loops_per_jiffy;
838	check_bugs32();
839
840	if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
841		check_bugs64();
842}