Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995 Linus Torvalds
  7 * Copyright (C) 1995 Waldorf Electronics
  8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
  9 * Copyright (C) 1996 Stoned Elipot
 10 * Copyright (C) 1999 Silicon Graphics, Inc.
 11 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
 12 */
 13#include <linux/init.h>
 14#include <linux/ioport.h>
 15#include <linux/export.h>
 16#include <linux/screen_info.h>
 17#include <linux/memblock.h>
 18#include <linux/initrd.h>
 19#include <linux/root_dev.h>
 20#include <linux/highmem.h>
 21#include <linux/console.h>
 22#include <linux/pfn.h>
 23#include <linux/debugfs.h>
 24#include <linux/kexec.h>
 25#include <linux/sizes.h>
 26#include <linux/device.h>
 27#include <linux/dma-contiguous.h>
 28#include <linux/decompress/generic.h>
 29#include <linux/of_fdt.h>
 30#include <linux/of_reserved_mem.h>
 
 31
 32#include <asm/addrspace.h>
 33#include <asm/bootinfo.h>
 34#include <asm/bugs.h>
 35#include <asm/cache.h>
 36#include <asm/cdmm.h>
 37#include <asm/cpu.h>
 38#include <asm/debug.h>
 39#include <asm/dma-coherence.h>
 40#include <asm/sections.h>
 41#include <asm/setup.h>
 42#include <asm/smp-ops.h>
 43#include <asm/prom.h>
 44
 45#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
 46const char __section(.appended_dtb) __appended_dtb[0x100000];
 47#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 48
 49struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
 50
 51EXPORT_SYMBOL(cpu_data);
 52
 53#ifdef CONFIG_VT
 54struct screen_info screen_info;
 55#endif
 56
 57/*
 58 * Setup information
 59 *
 60 * These are initialized so they are in the .data section
 61 */
 62unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
 63
 64EXPORT_SYMBOL(mips_machtype);
 65
 66static char __initdata command_line[COMMAND_LINE_SIZE];
 67char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
 68
 69#ifdef CONFIG_CMDLINE_BOOL
 70static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 
 
 71#endif
 72
 73/*
 74 * mips_io_port_base is the begin of the address space to which x86 style
 75 * I/O ports are mapped.
 76 */
 77unsigned long mips_io_port_base = -1;
 78EXPORT_SYMBOL(mips_io_port_base);
 79
 80static struct resource code_resource = { .name = "Kernel code", };
 81static struct resource data_resource = { .name = "Kernel data", };
 82static struct resource bss_resource = { .name = "Kernel bss", };
 83
 84static void *detect_magic __initdata = detect_memory_region;
 85
 86#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 87unsigned long ARCH_PFN_OFFSET;
 88EXPORT_SYMBOL(ARCH_PFN_OFFSET);
 89#endif
 90
 91void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
 92{
 93	/*
 94	 * Note: This function only exists for historical reason,
 95	 * new code should use memblock_add or memblock_add_node instead.
 96	 */
 97
 98	/*
 99	 * If the region reaches the top of the physical address space, adjust
100	 * the size slightly so that (start + size) doesn't overflow
101	 */
102	if (start + size - 1 == PHYS_ADDR_MAX)
103		--size;
104
105	/* Sanity check */
106	if (start + size < start) {
107		pr_warn("Trying to add an invalid memory region, skipped\n");
108		return;
109	}
110
111	if (start < PHYS_OFFSET)
112		return;
113
114	memblock_add(start, size);
115	/* Reserve any memory except the ordinary RAM ranges. */
116	switch (type) {
117	case BOOT_MEM_RAM:
118		break;
119
120	case BOOT_MEM_NOMAP: /* Discard the range from the system. */
121		memblock_remove(start, size);
122		break;
123
124	default: /* Reserve the rest of the memory types at boot time */
125		memblock_reserve(start, size);
126		break;
127	}
128}
129
130void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
131{
132	void *dm = &detect_magic;
133	phys_addr_t size;
134
135	for (size = sz_min; size < sz_max; size <<= 1) {
136		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
137			break;
138	}
139
140	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
141		((unsigned long long) size) / SZ_1M,
142		(unsigned long long) start,
143		((unsigned long long) sz_min) / SZ_1M,
144		((unsigned long long) sz_max) / SZ_1M);
145
146	add_memory_region(start, size, BOOT_MEM_RAM);
147}
148
149/*
150 * Manage initrd
151 */
152#ifdef CONFIG_BLK_DEV_INITRD
153
154static int __init rd_start_early(char *p)
155{
156	unsigned long start = memparse(p, &p);
157
158#ifdef CONFIG_64BIT
159	/* Guess if the sign extension was forgotten by bootloader */
160	if (start < XKPHYS)
161		start = (int)start;
162#endif
163	initrd_start = start;
164	initrd_end += start;
165	return 0;
166}
167early_param("rd_start", rd_start_early);
168
169static int __init rd_size_early(char *p)
170{
171	initrd_end += memparse(p, &p);
172	return 0;
173}
174early_param("rd_size", rd_size_early);
175
176/* it returns the next free pfn after initrd */
177static unsigned long __init init_initrd(void)
178{
179	unsigned long end;
180
181	/*
182	 * Board specific code or command line parser should have
183	 * already set up initrd_start and initrd_end. In these cases
184	 * perfom sanity checks and use them if all looks good.
185	 */
186	if (!initrd_start || initrd_end <= initrd_start)
187		goto disable;
188
189	if (initrd_start & ~PAGE_MASK) {
190		pr_err("initrd start must be page aligned\n");
191		goto disable;
192	}
193	if (initrd_start < PAGE_OFFSET) {
194		pr_err("initrd start < PAGE_OFFSET\n");
195		goto disable;
196	}
197
198	/*
199	 * Sanitize initrd addresses. For example firmware
200	 * can't guess if they need to pass them through
201	 * 64-bits values if the kernel has been built in pure
202	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
203	 * addresses now, so the code can now safely use __pa().
204	 */
205	end = __pa(initrd_end);
206	initrd_end = (unsigned long)__va(end);
207	initrd_start = (unsigned long)__va(__pa(initrd_start));
208
209	ROOT_DEV = Root_RAM0;
210	return PFN_UP(end);
211disable:
212	initrd_start = 0;
213	initrd_end = 0;
214	return 0;
215}
216
217/* In some conditions (e.g. big endian bootloader with a little endian
218   kernel), the initrd might appear byte swapped.  Try to detect this and
219   byte swap it if needed.  */
220static void __init maybe_bswap_initrd(void)
221{
222#if defined(CONFIG_CPU_CAVIUM_OCTEON)
223	u64 buf;
224
225	/* Check for CPIO signature */
226	if (!memcmp((void *)initrd_start, "070701", 6))
227		return;
228
229	/* Check for compressed initrd */
230	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
231		return;
232
233	/* Try again with a byte swapped header */
234	buf = swab64p((u64 *)initrd_start);
235	if (!memcmp(&buf, "070701", 6) ||
236	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
237		unsigned long i;
238
239		pr_info("Byteswapped initrd detected\n");
240		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
241			swab64s((u64 *)i);
242	}
243#endif
244}
245
246static void __init finalize_initrd(void)
247{
248	unsigned long size = initrd_end - initrd_start;
249
250	if (size == 0) {
251		printk(KERN_INFO "Initrd not found or empty");
252		goto disable;
253	}
254	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
255		printk(KERN_ERR "Initrd extends beyond end of memory");
256		goto disable;
257	}
258
259	maybe_bswap_initrd();
260
261	memblock_reserve(__pa(initrd_start), size);
262	initrd_below_start_ok = 1;
263
264	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
265		initrd_start, size);
266	return;
267disable:
268	printk(KERN_CONT " - disabling initrd\n");
269	initrd_start = 0;
270	initrd_end = 0;
271}
272
273#else  /* !CONFIG_BLK_DEV_INITRD */
274
275static unsigned long __init init_initrd(void)
276{
277	return 0;
278}
279
280#define finalize_initrd()	do {} while (0)
281
282#endif
283
284/*
285 * Initialize the bootmem allocator. It also setup initrd related data
286 * if needed.
287 */
288#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
289
290static void __init bootmem_init(void)
291{
292	init_initrd();
293	finalize_initrd();
294}
295
296#else  /* !CONFIG_SGI_IP27 */
297
298static void __init bootmem_init(void)
299{
300	struct memblock_region *mem;
301	phys_addr_t ramstart, ramend;
302
303	ramstart = memblock_start_of_DRAM();
304	ramend = memblock_end_of_DRAM();
305
306	/*
307	 * Sanity check any INITRD first. We don't take it into account
308	 * for bootmem setup initially, rely on the end-of-kernel-code
309	 * as our memory range starting point. Once bootmem is inited we
310	 * will reserve the area used for the initrd.
311	 */
312	init_initrd();
313
314	/* Reserve memory occupied by kernel. */
315	memblock_reserve(__pa_symbol(&_text),
316			__pa_symbol(&_end) - __pa_symbol(&_text));
317
318	/* max_low_pfn is not a number of pages but the end pfn of low mem */
319
320#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
321	ARCH_PFN_OFFSET = PFN_UP(ramstart);
322#else
323	/*
324	 * Reserve any memory between the start of RAM and PHYS_OFFSET
325	 */
326	if (ramstart > PHYS_OFFSET)
327		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
328
329	if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
330		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
331			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
332			(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
333	}
334#endif
335
336	min_low_pfn = ARCH_PFN_OFFSET;
337	max_pfn = PFN_DOWN(ramend);
338	for_each_memblock(memory, mem) {
339		unsigned long start = memblock_region_memory_base_pfn(mem);
340		unsigned long end = memblock_region_memory_end_pfn(mem);
341
342		/*
343		 * Skip highmem here so we get an accurate max_low_pfn if low
344		 * memory stops short of high memory.
345		 * If the region overlaps HIGHMEM_START, end is clipped so
346		 * max_pfn excludes the highmem portion.
347		 */
348		if (memblock_is_nomap(mem))
349			continue;
350		if (start >= PFN_DOWN(HIGHMEM_START))
351			continue;
352		if (end > PFN_DOWN(HIGHMEM_START))
353			end = PFN_DOWN(HIGHMEM_START);
354		if (end > max_low_pfn)
355			max_low_pfn = end;
356	}
357
358	if (min_low_pfn >= max_low_pfn)
359		panic("Incorrect memory mapping !!!");
360
361	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
362#ifdef CONFIG_HIGHMEM
363		highstart_pfn = PFN_DOWN(HIGHMEM_START);
364		highend_pfn = max_pfn;
365#else
366		max_low_pfn = PFN_DOWN(HIGHMEM_START);
367		max_pfn = max_low_pfn;
368#endif
369	}
370
371
372	/*
373	 * In any case the added to the memblock memory regions
374	 * (highmem/lowmem, available/reserved, etc) are considered
375	 * as present, so inform sparsemem about them.
376	 */
377	memblocks_present();
378
379	/*
380	 * Reserve initrd memory if needed.
381	 */
382	finalize_initrd();
383}
384
385#endif	/* CONFIG_SGI_IP27 */
386
387static int usermem __initdata;
388
389static int __init early_parse_mem(char *p)
390{
391	phys_addr_t start, size;
392
393	/*
394	 * If a user specifies memory size, we
395	 * blow away any automatically generated
396	 * size.
397	 */
398	if (usermem == 0) {
399		usermem = 1;
400		memblock_remove(memblock_start_of_DRAM(),
401			memblock_end_of_DRAM() - memblock_start_of_DRAM());
402	}
403	start = 0;
404	size = memparse(p, &p);
405	if (*p == '@')
406		start = memparse(p + 1, &p);
407
408	add_memory_region(start, size, BOOT_MEM_RAM);
409
410	return 0;
411}
412early_param("mem", early_parse_mem);
413
414static int __init early_parse_memmap(char *p)
415{
416	char *oldp;
417	u64 start_at, mem_size;
418
419	if (!p)
420		return -EINVAL;
421
422	if (!strncmp(p, "exactmap", 8)) {
423		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
424		return 0;
425	}
426
427	oldp = p;
428	mem_size = memparse(p, &p);
429	if (p == oldp)
430		return -EINVAL;
431
432	if (*p == '@') {
433		start_at = memparse(p+1, &p);
434		add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
435	} else if (*p == '#') {
436		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
437		return -EINVAL;
438	} else if (*p == '$') {
439		start_at = memparse(p+1, &p);
440		add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
441	} else {
442		pr_err("\"memmap\" invalid format!\n");
443		return -EINVAL;
444	}
445
446	if (*p == '\0') {
447		usermem = 1;
448		return 0;
449	} else
450		return -EINVAL;
451}
452early_param("memmap", early_parse_memmap);
453
454#ifdef CONFIG_PROC_VMCORE
455unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
456static int __init early_parse_elfcorehdr(char *p)
457{
458	struct memblock_region *mem;
459
460	setup_elfcorehdr = memparse(p, &p);
461
462	 for_each_memblock(memory, mem) {
463		unsigned long start = mem->base;
464		unsigned long end = start + mem->size;
465		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
466			/*
467			 * Reserve from the elf core header to the end of
468			 * the memory segment, that should all be kdump
469			 * reserved memory.
470			 */
471			setup_elfcorehdr_size = end - setup_elfcorehdr;
472			break;
473		}
474	}
475	/*
476	 * If we don't find it in the memory map, then we shouldn't
477	 * have to worry about it, as the new kernel won't use it.
478	 */
479	return 0;
480}
481early_param("elfcorehdr", early_parse_elfcorehdr);
482#endif
483
484#ifdef CONFIG_KEXEC
485static void __init mips_parse_crashkernel(void)
486{
487	unsigned long long total_mem;
488	unsigned long long crash_size, crash_base;
489	int ret;
490
491	total_mem = memblock_phys_mem_size();
492	ret = parse_crashkernel(boot_command_line, total_mem,
493				&crash_size, &crash_base);
494	if (ret != 0 || crash_size <= 0)
495		return;
496
497	if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
498		pr_warn("Invalid memory region reserved for crash kernel\n");
499		return;
500	}
501
502	crashk_res.start = crash_base;
503	crashk_res.end	 = crash_base + crash_size - 1;
504}
505
506static void __init request_crashkernel(struct resource *res)
507{
508	int ret;
509
510	if (crashk_res.start == crashk_res.end)
511		return;
512
513	ret = request_resource(res, &crashk_res);
514	if (!ret)
515		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
516			(unsigned long)((crashk_res.end -
517					 crashk_res.start + 1) >> 20),
518			(unsigned long)(crashk_res.start  >> 20));
519}
520#else /* !defined(CONFIG_KEXEC)		*/
521static void __init mips_parse_crashkernel(void)
522{
523}
524
525static void __init request_crashkernel(struct resource *res)
526{
527}
528#endif /* !defined(CONFIG_KEXEC)  */
529
530static void __init check_kernel_sections_mem(void)
531{
532	phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
533	phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
534
535	if (!memblock_is_region_memory(start, size)) {
536		pr_info("Kernel sections are not in the memory maps\n");
537		memblock_add(start, size);
538	}
539}
540
541#define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
542#define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
543#define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
544#define BUILTIN_EXTEND_WITH_PROM	\
545	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546
547/*
548 * arch_mem_init - initialize memory management subsystem
549 *
550 *  o plat_mem_setup() detects the memory configuration and will record detected
551 *    memory areas using add_memory_region.
552 *
553 * At this stage the memory configuration of the system is known to the
554 * kernel but generic memory management system is still entirely uninitialized.
555 *
556 *  o bootmem_init()
557 *  o sparse_init()
558 *  o paging_init()
559 *  o dma_contiguous_reserve()
560 *
561 * At this stage the bootmem allocator is ready to use.
562 *
563 * NOTE: historically plat_mem_setup did the entire platform initialization.
564 *	 This was rather impractical because it meant plat_mem_setup had to
565 * get away without any kind of memory allocator.  To keep old code from
566 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
567 * initialization hook for anything else was introduced.
568 */
569static void __init arch_mem_init(char **cmdline_p)
570{
571	extern void plat_mem_setup(void);
572
573	/*
574	 * Initialize boot_command_line to an innocuous but non-empty string in
575	 * order to prevent early_init_dt_scan_chosen() from copying
576	 * CONFIG_CMDLINE into it without our knowledge. We handle
577	 * CONFIG_CMDLINE ourselves below & don't want to duplicate its
578	 * content because repeating arguments can be problematic.
579	 */
580	strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
581
582	/* call board setup routine */
583	plat_mem_setup();
584	memblock_set_bottom_up(true);
585
586#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
587	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
588#else
589	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
590	    (USE_DTB_CMDLINE && !boot_command_line[0]))
591		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
592
593	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
594		if (boot_command_line[0])
595			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
596		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
597	}
598
599#if defined(CONFIG_CMDLINE_BOOL)
600	if (builtin_cmdline[0]) {
601		if (boot_command_line[0])
602			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
603		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
604	}
605
606	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
607		if (boot_command_line[0])
608			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
609		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
610	}
611#endif
612#endif
613	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
614
615	*cmdline_p = command_line;
616
617	parse_early_param();
618
619	if (usermem)
620		pr_info("User-defined physical RAM map overwrite\n");
621
622	check_kernel_sections_mem();
623
624	early_init_fdt_reserve_self();
625	early_init_fdt_scan_reserved_mem();
626
627#ifndef CONFIG_NUMA
628	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
629#endif
630	bootmem_init();
631
632	/*
633	 * Prevent memblock from allocating high memory.
634	 * This cannot be done before max_low_pfn is detected, so up
635	 * to this point is possible to only reserve physical memory
636	 * with memblock_reserve; memblock_alloc* can be used
637	 * only after this point
638	 */
639	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
640
641#ifdef CONFIG_PROC_VMCORE
642	if (setup_elfcorehdr && setup_elfcorehdr_size) {
643		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
644		       setup_elfcorehdr, setup_elfcorehdr_size);
645		memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
646	}
647#endif
648
649	mips_parse_crashkernel();
650#ifdef CONFIG_KEXEC
651	if (crashk_res.start != crashk_res.end)
652		memblock_reserve(crashk_res.start,
653				 crashk_res.end - crashk_res.start + 1);
654#endif
655	device_tree_init();
 
 
 
 
 
 
 
 
656	sparse_init();
 
 
657	plat_swiotlb_setup();
658
659	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
660
661	/* Reserve for hibernation. */
662	memblock_reserve(__pa_symbol(&__nosave_begin),
663		__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
664
665	fdt_init_reserved_mem();
666
667	memblock_dump_all();
668
669	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
670}
671
672static void __init resource_init(void)
673{
674	struct memblock_region *region;
675
676	if (UNCAC_BASE != IO_BASE)
677		return;
678
679	code_resource.start = __pa_symbol(&_text);
680	code_resource.end = __pa_symbol(&_etext) - 1;
681	data_resource.start = __pa_symbol(&_etext);
682	data_resource.end = __pa_symbol(&_edata) - 1;
683	bss_resource.start = __pa_symbol(&__bss_start);
684	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
685
686	for_each_memblock(memory, region) {
687		phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
688		phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
689		struct resource *res;
690
691		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
692		if (!res)
693			panic("%s: Failed to allocate %zu bytes\n", __func__,
694			      sizeof(struct resource));
695
696		res->start = start;
697		res->end = end;
698		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
699		res->name = "System RAM";
700
701		request_resource(&iomem_resource, res);
702
703		/*
704		 *  We don't know which RAM region contains kernel data,
705		 *  so we try it repeatedly and let the resource manager
706		 *  test it.
707		 */
708		request_resource(res, &code_resource);
709		request_resource(res, &data_resource);
710		request_resource(res, &bss_resource);
711		request_crashkernel(res);
712	}
713}
714
715#ifdef CONFIG_SMP
716static void __init prefill_possible_map(void)
717{
718	int i, possible = num_possible_cpus();
719
720	if (possible > nr_cpu_ids)
721		possible = nr_cpu_ids;
722
723	for (i = 0; i < possible; i++)
724		set_cpu_possible(i, true);
725	for (; i < NR_CPUS; i++)
726		set_cpu_possible(i, false);
727
728	nr_cpu_ids = possible;
729}
730#else
731static inline void prefill_possible_map(void) {}
732#endif
733
734void __init setup_arch(char **cmdline_p)
735{
736	cpu_probe();
737	mips_cm_probe();
738	prom_init();
739
740	setup_early_fdc_console();
741#ifdef CONFIG_EARLY_PRINTK
742	setup_early_printk();
743#endif
744	cpu_report();
745	check_bugs_early();
746
747#if defined(CONFIG_VT)
748#if defined(CONFIG_VGA_CONSOLE)
749	conswitchp = &vga_con;
750#elif defined(CONFIG_DUMMY_CONSOLE)
751	conswitchp = &dummy_con;
752#endif
753#endif
754
755	arch_mem_init(cmdline_p);
 
756
757	resource_init();
758	plat_smp_setup();
759	prefill_possible_map();
760
761	cpu_cache_init();
762	paging_init();
763}
764
765unsigned long kernelsp[NR_CPUS];
766unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
767
768#ifdef CONFIG_USE_OF
769unsigned long fw_passed_dtb;
770#endif
771
772#ifdef CONFIG_DEBUG_FS
773struct dentry *mips_debugfs_dir;
774static int __init debugfs_mips(void)
775{
776	mips_debugfs_dir = debugfs_create_dir("mips", NULL);
777	return 0;
778}
779arch_initcall(debugfs_mips);
780#endif
781
782#ifdef CONFIG_DMA_MAYBE_COHERENT
783/* User defined DMA coherency from command line. */
784enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
785EXPORT_SYMBOL_GPL(coherentio);
786int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */
787
788static int __init setcoherentio(char *str)
789{
790	coherentio = IO_COHERENCE_ENABLED;
791	pr_info("Hardware DMA cache coherency (command line)\n");
792	return 0;
793}
794early_param("coherentio", setcoherentio);
795
796static int __init setnocoherentio(char *str)
797{
798	coherentio = IO_COHERENCE_DISABLED;
799	pr_info("Software DMA cache coherency (command line)\n");
800	return 0;
801}
802early_param("nocoherentio", setnocoherentio);
803#endif
v5.9
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995 Linus Torvalds
  7 * Copyright (C) 1995 Waldorf Electronics
  8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
  9 * Copyright (C) 1996 Stoned Elipot
 10 * Copyright (C) 1999 Silicon Graphics, Inc.
 11 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
 12 */
 13#include <linux/init.h>
 14#include <linux/ioport.h>
 15#include <linux/export.h>
 16#include <linux/screen_info.h>
 17#include <linux/memblock.h>
 18#include <linux/initrd.h>
 19#include <linux/root_dev.h>
 20#include <linux/highmem.h>
 21#include <linux/console.h>
 22#include <linux/pfn.h>
 23#include <linux/debugfs.h>
 24#include <linux/kexec.h>
 25#include <linux/sizes.h>
 26#include <linux/device.h>
 27#include <linux/dma-contiguous.h>
 28#include <linux/decompress/generic.h>
 29#include <linux/of_fdt.h>
 30#include <linux/of_reserved_mem.h>
 31#include <linux/dmi.h>
 32
 33#include <asm/addrspace.h>
 34#include <asm/bootinfo.h>
 35#include <asm/bugs.h>
 36#include <asm/cache.h>
 37#include <asm/cdmm.h>
 38#include <asm/cpu.h>
 39#include <asm/debug.h>
 40#include <asm/dma-coherence.h>
 41#include <asm/sections.h>
 42#include <asm/setup.h>
 43#include <asm/smp-ops.h>
 44#include <asm/prom.h>
 45
 46#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
 47const char __section(.appended_dtb) __appended_dtb[0x100000];
 48#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 49
 50struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
 51
 52EXPORT_SYMBOL(cpu_data);
 53
 54#ifdef CONFIG_VT
 55struct screen_info screen_info;
 56#endif
 57
 58/*
 59 * Setup information
 60 *
 61 * These are initialized so they are in the .data section
 62 */
 63unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
 64
 65EXPORT_SYMBOL(mips_machtype);
 66
 67static char __initdata command_line[COMMAND_LINE_SIZE];
 68char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
 69
 70#ifdef CONFIG_CMDLINE_BOOL
 71static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
 72#else
 73static const char builtin_cmdline[] __initconst = "";
 74#endif
 75
 76/*
 77 * mips_io_port_base is the begin of the address space to which x86 style
 78 * I/O ports are mapped.
 79 */
 80unsigned long mips_io_port_base = -1;
 81EXPORT_SYMBOL(mips_io_port_base);
 82
 83static struct resource code_resource = { .name = "Kernel code", };
 84static struct resource data_resource = { .name = "Kernel data", };
 85static struct resource bss_resource = { .name = "Kernel bss", };
 86
 87static void *detect_magic __initdata = detect_memory_region;
 88
 89#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 90unsigned long ARCH_PFN_OFFSET;
 91EXPORT_SYMBOL(ARCH_PFN_OFFSET);
 92#endif
 93
 94void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
 95{
 96	/*
 97	 * Note: This function only exists for historical reason,
 98	 * new code should use memblock_add or memblock_add_node instead.
 99	 */
100
101	/*
102	 * If the region reaches the top of the physical address space, adjust
103	 * the size slightly so that (start + size) doesn't overflow
104	 */
105	if (start + size - 1 == PHYS_ADDR_MAX)
106		--size;
107
108	/* Sanity check */
109	if (start + size < start) {
110		pr_warn("Trying to add an invalid memory region, skipped\n");
111		return;
112	}
113
114	if (start < PHYS_OFFSET)
115		return;
116
117	memblock_add(start, size);
118	/* Reserve any memory except the ordinary RAM ranges. */
119	switch (type) {
120	case BOOT_MEM_RAM:
121		break;
122
123	case BOOT_MEM_NOMAP: /* Discard the range from the system. */
124		memblock_remove(start, size);
125		break;
126
127	default: /* Reserve the rest of the memory types at boot time */
128		memblock_reserve(start, size);
129		break;
130	}
131}
132
133void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
134{
135	void *dm = &detect_magic;
136	phys_addr_t size;
137
138	for (size = sz_min; size < sz_max; size <<= 1) {
139		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
140			break;
141	}
142
143	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
144		((unsigned long long) size) / SZ_1M,
145		(unsigned long long) start,
146		((unsigned long long) sz_min) / SZ_1M,
147		((unsigned long long) sz_max) / SZ_1M);
148
149	add_memory_region(start, size, BOOT_MEM_RAM);
150}
151
152/*
153 * Manage initrd
154 */
155#ifdef CONFIG_BLK_DEV_INITRD
156
157static int __init rd_start_early(char *p)
158{
159	unsigned long start = memparse(p, &p);
160
161#ifdef CONFIG_64BIT
162	/* Guess if the sign extension was forgotten by bootloader */
163	if (start < XKPHYS)
164		start = (int)start;
165#endif
166	initrd_start = start;
167	initrd_end += start;
168	return 0;
169}
170early_param("rd_start", rd_start_early);
171
172static int __init rd_size_early(char *p)
173{
174	initrd_end += memparse(p, &p);
175	return 0;
176}
177early_param("rd_size", rd_size_early);
178
179/* it returns the next free pfn after initrd */
180static unsigned long __init init_initrd(void)
181{
182	unsigned long end;
183
184	/*
185	 * Board specific code or command line parser should have
186	 * already set up initrd_start and initrd_end. In these cases
187	 * perfom sanity checks and use them if all looks good.
188	 */
189	if (!initrd_start || initrd_end <= initrd_start)
190		goto disable;
191
192	if (initrd_start & ~PAGE_MASK) {
193		pr_err("initrd start must be page aligned\n");
194		goto disable;
195	}
196	if (initrd_start < PAGE_OFFSET) {
197		pr_err("initrd start < PAGE_OFFSET\n");
198		goto disable;
199	}
200
201	/*
202	 * Sanitize initrd addresses. For example firmware
203	 * can't guess if they need to pass them through
204	 * 64-bits values if the kernel has been built in pure
205	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
206	 * addresses now, so the code can now safely use __pa().
207	 */
208	end = __pa(initrd_end);
209	initrd_end = (unsigned long)__va(end);
210	initrd_start = (unsigned long)__va(__pa(initrd_start));
211
212	ROOT_DEV = Root_RAM0;
213	return PFN_UP(end);
214disable:
215	initrd_start = 0;
216	initrd_end = 0;
217	return 0;
218}
219
220/* In some conditions (e.g. big endian bootloader with a little endian
221   kernel), the initrd might appear byte swapped.  Try to detect this and
222   byte swap it if needed.  */
223static void __init maybe_bswap_initrd(void)
224{
225#if defined(CONFIG_CPU_CAVIUM_OCTEON)
226	u64 buf;
227
228	/* Check for CPIO signature */
229	if (!memcmp((void *)initrd_start, "070701", 6))
230		return;
231
232	/* Check for compressed initrd */
233	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
234		return;
235
236	/* Try again with a byte swapped header */
237	buf = swab64p((u64 *)initrd_start);
238	if (!memcmp(&buf, "070701", 6) ||
239	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
240		unsigned long i;
241
242		pr_info("Byteswapped initrd detected\n");
243		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
244			swab64s((u64 *)i);
245	}
246#endif
247}
248
249static void __init finalize_initrd(void)
250{
251	unsigned long size = initrd_end - initrd_start;
252
253	if (size == 0) {
254		printk(KERN_INFO "Initrd not found or empty");
255		goto disable;
256	}
257	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
258		printk(KERN_ERR "Initrd extends beyond end of memory");
259		goto disable;
260	}
261
262	maybe_bswap_initrd();
263
264	memblock_reserve(__pa(initrd_start), size);
265	initrd_below_start_ok = 1;
266
267	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
268		initrd_start, size);
269	return;
270disable:
271	printk(KERN_CONT " - disabling initrd\n");
272	initrd_start = 0;
273	initrd_end = 0;
274}
275
276#else  /* !CONFIG_BLK_DEV_INITRD */
277
278static unsigned long __init init_initrd(void)
279{
280	return 0;
281}
282
283#define finalize_initrd()	do {} while (0)
284
285#endif
286
287/*
288 * Initialize the bootmem allocator. It also setup initrd related data
289 * if needed.
290 */
291#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
292
293static void __init bootmem_init(void)
294{
295	init_initrd();
296	finalize_initrd();
297}
298
299#else  /* !CONFIG_SGI_IP27 */
300
301static void __init bootmem_init(void)
302{
303	struct memblock_region *mem;
304	phys_addr_t ramstart, ramend;
305
306	ramstart = memblock_start_of_DRAM();
307	ramend = memblock_end_of_DRAM();
308
309	/*
310	 * Sanity check any INITRD first. We don't take it into account
311	 * for bootmem setup initially, rely on the end-of-kernel-code
312	 * as our memory range starting point. Once bootmem is inited we
313	 * will reserve the area used for the initrd.
314	 */
315	init_initrd();
316
317	/* Reserve memory occupied by kernel. */
318	memblock_reserve(__pa_symbol(&_text),
319			__pa_symbol(&_end) - __pa_symbol(&_text));
320
321	/* max_low_pfn is not a number of pages but the end pfn of low mem */
322
323#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
324	ARCH_PFN_OFFSET = PFN_UP(ramstart);
325#else
326	/*
327	 * Reserve any memory between the start of RAM and PHYS_OFFSET
328	 */
329	if (ramstart > PHYS_OFFSET)
330		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
331
332	if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
333		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
334			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
335			(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
336	}
337#endif
338
339	min_low_pfn = ARCH_PFN_OFFSET;
340	max_pfn = PFN_DOWN(ramend);
341	for_each_memblock(memory, mem) {
342		unsigned long start = memblock_region_memory_base_pfn(mem);
343		unsigned long end = memblock_region_memory_end_pfn(mem);
344
345		/*
346		 * Skip highmem here so we get an accurate max_low_pfn if low
347		 * memory stops short of high memory.
348		 * If the region overlaps HIGHMEM_START, end is clipped so
349		 * max_pfn excludes the highmem portion.
350		 */
351		if (memblock_is_nomap(mem))
352			continue;
353		if (start >= PFN_DOWN(HIGHMEM_START))
354			continue;
355		if (end > PFN_DOWN(HIGHMEM_START))
356			end = PFN_DOWN(HIGHMEM_START);
357		if (end > max_low_pfn)
358			max_low_pfn = end;
359	}
360
361	if (min_low_pfn >= max_low_pfn)
362		panic("Incorrect memory mapping !!!");
363
364	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
365#ifdef CONFIG_HIGHMEM
366		highstart_pfn = PFN_DOWN(HIGHMEM_START);
367		highend_pfn = max_pfn;
368#else
369		max_low_pfn = PFN_DOWN(HIGHMEM_START);
370		max_pfn = max_low_pfn;
371#endif
372	}
373
 
 
 
 
 
 
 
 
374	/*
375	 * Reserve initrd memory if needed.
376	 */
377	finalize_initrd();
378}
379
380#endif	/* CONFIG_SGI_IP27 */
381
382static int usermem __initdata;
383
384static int __init early_parse_mem(char *p)
385{
386	phys_addr_t start, size;
387
388	/*
389	 * If a user specifies memory size, we
390	 * blow away any automatically generated
391	 * size.
392	 */
393	if (usermem == 0) {
394		usermem = 1;
395		memblock_remove(memblock_start_of_DRAM(),
396			memblock_end_of_DRAM() - memblock_start_of_DRAM());
397	}
398	start = 0;
399	size = memparse(p, &p);
400	if (*p == '@')
401		start = memparse(p + 1, &p);
402
403	add_memory_region(start, size, BOOT_MEM_RAM);
404
405	return 0;
406}
407early_param("mem", early_parse_mem);
408
409static int __init early_parse_memmap(char *p)
410{
411	char *oldp;
412	u64 start_at, mem_size;
413
414	if (!p)
415		return -EINVAL;
416
417	if (!strncmp(p, "exactmap", 8)) {
418		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
419		return 0;
420	}
421
422	oldp = p;
423	mem_size = memparse(p, &p);
424	if (p == oldp)
425		return -EINVAL;
426
427	if (*p == '@') {
428		start_at = memparse(p+1, &p);
429		add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
430	} else if (*p == '#') {
431		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
432		return -EINVAL;
433	} else if (*p == '$') {
434		start_at = memparse(p+1, &p);
435		add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
436	} else {
437		pr_err("\"memmap\" invalid format!\n");
438		return -EINVAL;
439	}
440
441	if (*p == '\0') {
442		usermem = 1;
443		return 0;
444	} else
445		return -EINVAL;
446}
447early_param("memmap", early_parse_memmap);
448
449#ifdef CONFIG_PROC_VMCORE
450unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
451static int __init early_parse_elfcorehdr(char *p)
452{
453	struct memblock_region *mem;
454
455	setup_elfcorehdr = memparse(p, &p);
456
457	 for_each_memblock(memory, mem) {
458		unsigned long start = mem->base;
459		unsigned long end = start + mem->size;
460		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
461			/*
462			 * Reserve from the elf core header to the end of
463			 * the memory segment, that should all be kdump
464			 * reserved memory.
465			 */
466			setup_elfcorehdr_size = end - setup_elfcorehdr;
467			break;
468		}
469	}
470	/*
471	 * If we don't find it in the memory map, then we shouldn't
472	 * have to worry about it, as the new kernel won't use it.
473	 */
474	return 0;
475}
476early_param("elfcorehdr", early_parse_elfcorehdr);
477#endif
478
479#ifdef CONFIG_KEXEC
480static void __init mips_parse_crashkernel(void)
481{
482	unsigned long long total_mem;
483	unsigned long long crash_size, crash_base;
484	int ret;
485
486	total_mem = memblock_phys_mem_size();
487	ret = parse_crashkernel(boot_command_line, total_mem,
488				&crash_size, &crash_base);
489	if (ret != 0 || crash_size <= 0)
490		return;
491
492	if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
493		pr_warn("Invalid memory region reserved for crash kernel\n");
494		return;
495	}
496
497	crashk_res.start = crash_base;
498	crashk_res.end	 = crash_base + crash_size - 1;
499}
500
501static void __init request_crashkernel(struct resource *res)
502{
503	int ret;
504
505	if (crashk_res.start == crashk_res.end)
506		return;
507
508	ret = request_resource(res, &crashk_res);
509	if (!ret)
510		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
511			(unsigned long)(resource_size(&crashk_res) >> 20),
 
512			(unsigned long)(crashk_res.start  >> 20));
513}
514#else /* !defined(CONFIG_KEXEC)		*/
515static void __init mips_parse_crashkernel(void)
516{
517}
518
519static void __init request_crashkernel(struct resource *res)
520{
521}
522#endif /* !defined(CONFIG_KEXEC)  */
523
524static void __init check_kernel_sections_mem(void)
525{
526	phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
527	phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
528
529	if (!memblock_is_region_memory(start, size)) {
530		pr_info("Kernel sections are not in the memory maps\n");
531		memblock_add(start, size);
532	}
533}
534
535static void __init bootcmdline_append(const char *s, size_t max)
536{
537	if (!s[0] || !max)
538		return;
539
540	if (boot_command_line[0])
541		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
542
543	strlcat(boot_command_line, s, max);
544}
545
546#ifdef CONFIG_OF_EARLY_FLATTREE
547
548static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
549					  int depth, void *data)
550{
551	bool *dt_bootargs = data;
552	const char *p;
553	int l;
554
555	if (depth != 1 || !data ||
556	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
557		return 0;
558
559	p = of_get_flat_dt_prop(node, "bootargs", &l);
560	if (p != NULL && l > 0) {
561		bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
562		*dt_bootargs = true;
563	}
564
565	return 1;
566}
567
568#endif /* CONFIG_OF_EARLY_FLATTREE */
569
570static void __init bootcmdline_init(void)
571{
572	bool dt_bootargs = false;
573
574	/*
575	 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
576	 * trivial - we simply use the built-in command line unconditionally &
577	 * unmodified.
578	 */
579	if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
580		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
581		return;
582	}
583
584	/*
585	 * If the user specified a built-in command line &
586	 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
587	 * prepended to arguments from the bootloader or DT so we'll copy them
588	 * to the start of boot_command_line here. Otherwise, empty
589	 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
590	 */
591	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
592		strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
593	else
594		boot_command_line[0] = 0;
595
596#ifdef CONFIG_OF_EARLY_FLATTREE
597	/*
598	 * If we're configured to take boot arguments from DT, look for those
599	 * now.
600	 */
601	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
602	    IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
603		of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
604#endif
605
606	/*
607	 * If we didn't get any arguments from DT (regardless of whether that's
608	 * because we weren't configured to look for them, or because we looked
609	 * & found none) then we'll take arguments from the bootloader.
610	 * plat_mem_setup() should have filled arcs_cmdline with arguments from
611	 * the bootloader.
612	 */
613	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
614		bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
615
616	/*
617	 * If the user specified a built-in command line & we didn't already
618	 * prepend it, we append it to boot_command_line here.
619	 */
620	if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
621	    !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
622		bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
623}
624
625/*
626 * arch_mem_init - initialize memory management subsystem
627 *
628 *  o plat_mem_setup() detects the memory configuration and will record detected
629 *    memory areas using add_memory_region.
630 *
631 * At this stage the memory configuration of the system is known to the
632 * kernel but generic memory management system is still entirely uninitialized.
633 *
634 *  o bootmem_init()
635 *  o sparse_init()
636 *  o paging_init()
637 *  o dma_contiguous_reserve()
638 *
639 * At this stage the bootmem allocator is ready to use.
640 *
641 * NOTE: historically plat_mem_setup did the entire platform initialization.
642 *	 This was rather impractical because it meant plat_mem_setup had to
643 * get away without any kind of memory allocator.  To keep old code from
644 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
645 * initialization hook for anything else was introduced.
646 */
647static void __init arch_mem_init(char **cmdline_p)
648{
 
 
 
 
 
 
 
 
 
 
 
649	/* call board setup routine */
650	plat_mem_setup();
651	memblock_set_bottom_up(true);
652
653	bootcmdline_init();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
654	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 
655	*cmdline_p = command_line;
656
657	parse_early_param();
658
659	if (usermem)
660		pr_info("User-defined physical RAM map overwrite\n");
661
662	check_kernel_sections_mem();
663
664	early_init_fdt_reserve_self();
665	early_init_fdt_scan_reserved_mem();
666
667#ifndef CONFIG_NUMA
668	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
669#endif
670	bootmem_init();
671
672	/*
673	 * Prevent memblock from allocating high memory.
674	 * This cannot be done before max_low_pfn is detected, so up
675	 * to this point is possible to only reserve physical memory
676	 * with memblock_reserve; memblock_alloc* can be used
677	 * only after this point
678	 */
679	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
680
681#ifdef CONFIG_PROC_VMCORE
682	if (setup_elfcorehdr && setup_elfcorehdr_size) {
683		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
684		       setup_elfcorehdr, setup_elfcorehdr_size);
685		memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
686	}
687#endif
688
689	mips_parse_crashkernel();
690#ifdef CONFIG_KEXEC
691	if (crashk_res.start != crashk_res.end)
692		memblock_reserve(crashk_res.start, resource_size(&crashk_res));
 
693#endif
694	device_tree_init();
695
696	/*
697	 * In order to reduce the possibility of kernel panic when failed to
698	 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
699	 * low memory as small as possible before plat_swiotlb_setup(), so
700	 * make sparse_init() using top-down allocation.
701	 */
702	memblock_set_bottom_up(false);
703	sparse_init();
704	memblock_set_bottom_up(true);
705
706	plat_swiotlb_setup();
707
708	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
709
710	/* Reserve for hibernation. */
711	memblock_reserve(__pa_symbol(&__nosave_begin),
712		__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
713
714	fdt_init_reserved_mem();
715
716	memblock_dump_all();
717
718	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
719}
720
721static void __init resource_init(void)
722{
723	struct memblock_region *region;
724
725	if (UNCAC_BASE != IO_BASE)
726		return;
727
728	code_resource.start = __pa_symbol(&_text);
729	code_resource.end = __pa_symbol(&_etext) - 1;
730	data_resource.start = __pa_symbol(&_etext);
731	data_resource.end = __pa_symbol(&_edata) - 1;
732	bss_resource.start = __pa_symbol(&__bss_start);
733	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
734
735	for_each_memblock(memory, region) {
736		phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
737		phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
738		struct resource *res;
739
740		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
741		if (!res)
742			panic("%s: Failed to allocate %zu bytes\n", __func__,
743			      sizeof(struct resource));
744
745		res->start = start;
746		res->end = end;
747		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
748		res->name = "System RAM";
749
750		request_resource(&iomem_resource, res);
751
752		/*
753		 *  We don't know which RAM region contains kernel data,
754		 *  so we try it repeatedly and let the resource manager
755		 *  test it.
756		 */
757		request_resource(res, &code_resource);
758		request_resource(res, &data_resource);
759		request_resource(res, &bss_resource);
760		request_crashkernel(res);
761	}
762}
763
764#ifdef CONFIG_SMP
765static void __init prefill_possible_map(void)
766{
767	int i, possible = num_possible_cpus();
768
769	if (possible > nr_cpu_ids)
770		possible = nr_cpu_ids;
771
772	for (i = 0; i < possible; i++)
773		set_cpu_possible(i, true);
774	for (; i < NR_CPUS; i++)
775		set_cpu_possible(i, false);
776
777	nr_cpu_ids = possible;
778}
779#else
780static inline void prefill_possible_map(void) {}
781#endif
782
783void __init setup_arch(char **cmdline_p)
784{
785	cpu_probe();
786	mips_cm_probe();
787	prom_init();
788
789	setup_early_fdc_console();
790#ifdef CONFIG_EARLY_PRINTK
791	setup_early_printk();
792#endif
793	cpu_report();
794	check_bugs_early();
795
796#if defined(CONFIG_VT)
797#if defined(CONFIG_VGA_CONSOLE)
798	conswitchp = &vga_con;
 
 
799#endif
800#endif
801
802	arch_mem_init(cmdline_p);
803	dmi_setup();
804
805	resource_init();
806	plat_smp_setup();
807	prefill_possible_map();
808
809	cpu_cache_init();
810	paging_init();
811}
812
813unsigned long kernelsp[NR_CPUS];
814unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
815
816#ifdef CONFIG_USE_OF
817unsigned long fw_passed_dtb;
818#endif
819
820#ifdef CONFIG_DEBUG_FS
821struct dentry *mips_debugfs_dir;
822static int __init debugfs_mips(void)
823{
824	mips_debugfs_dir = debugfs_create_dir("mips", NULL);
825	return 0;
826}
827arch_initcall(debugfs_mips);
828#endif
829
830#ifdef CONFIG_DMA_MAYBE_COHERENT
831/* User defined DMA coherency from command line. */
832enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
833EXPORT_SYMBOL_GPL(coherentio);
834int hw_coherentio;	/* Actual hardware supported DMA coherency setting. */
835
836static int __init setcoherentio(char *str)
837{
838	coherentio = IO_COHERENCE_ENABLED;
839	pr_info("Hardware DMA cache coherency (command line)\n");
840	return 0;
841}
842early_param("coherentio", setcoherentio);
843
844static int __init setnocoherentio(char *str)
845{
846	coherentio = IO_COHERENCE_DISABLED;
847	pr_info("Software DMA cache coherency (command line)\n");
848	return 0;
849}
850early_param("nocoherentio", setnocoherentio);
851#endif