Linux Audio

Check our new training course

Loading...
v6.2
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995 Linus Torvalds
  7 * Copyright (C) 1995 Waldorf Electronics
  8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
  9 * Copyright (C) 1996 Stoned Elipot
 10 * Copyright (C) 1999 Silicon Graphics, Inc.
 11 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
 12 */
 13#include <linux/init.h>
 14#include <linux/ioport.h>
 15#include <linux/export.h>
 16#include <linux/screen_info.h>
 17#include <linux/memblock.h>
 18#include <linux/initrd.h>
 19#include <linux/root_dev.h>
 20#include <linux/highmem.h>
 21#include <linux/console.h>
 22#include <linux/pfn.h>
 23#include <linux/debugfs.h>
 24#include <linux/kexec.h>
 25#include <linux/sizes.h>
 26#include <linux/device.h>
 27#include <linux/dma-map-ops.h>
 28#include <linux/decompress/generic.h>
 29#include <linux/of_fdt.h>
 30#include <linux/dmi.h>
 31#include <linux/crash_dump.h>
 32
 33#include <asm/addrspace.h>
 34#include <asm/bootinfo.h>
 35#include <asm/bugs.h>
 36#include <asm/cache.h>
 37#include <asm/cdmm.h>
 38#include <asm/cpu.h>
 39#include <asm/debug.h>
 40#include <asm/mmzone.h>
 41#include <asm/sections.h>
 42#include <asm/setup.h>
 43#include <asm/smp-ops.h>
 44#include <asm/prom.h>
 45#include <asm/fw/fw.h>
 46
 47#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
 48char __section(".appended_dtb") __appended_dtb[0x100000];
 49#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 50
 51struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
 52
 53EXPORT_SYMBOL(cpu_data);
 54
 55#ifdef CONFIG_VT
 56struct screen_info screen_info;
 57#endif
 58
 59/*
 60 * Setup information
 61 *
 62 * These are initialized so they are in the .data section
 63 */
 64unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
 65
 66EXPORT_SYMBOL(mips_machtype);
 67
 68static char __initdata command_line[COMMAND_LINE_SIZE];
 69char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
 70
 71#ifdef CONFIG_CMDLINE_BOOL
 72static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
 73#else
 74static const char builtin_cmdline[] __initconst = "";
 75#endif
 76
 77/*
 78 * mips_io_port_base is the begin of the address space to which x86 style
 79 * I/O ports are mapped.
 80 */
 81unsigned long mips_io_port_base = -1;
 82EXPORT_SYMBOL(mips_io_port_base);
 83
 84static struct resource code_resource = { .name = "Kernel code", };
 85static struct resource data_resource = { .name = "Kernel data", };
 86static struct resource bss_resource = { .name = "Kernel bss", };
 87
 88unsigned long __kaslr_offset __ro_after_init;
 89EXPORT_SYMBOL(__kaslr_offset);
 90
 91static void *detect_magic __initdata = detect_memory_region;
 92
 93#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 94unsigned long ARCH_PFN_OFFSET;
 95EXPORT_SYMBOL(ARCH_PFN_OFFSET);
 96#endif
 97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
 99{
100	void *dm = &detect_magic;
101	phys_addr_t size;
102
103	for (size = sz_min; size < sz_max; size <<= 1) {
104		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
105			break;
106	}
107
108	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
109		((unsigned long long) size) / SZ_1M,
110		(unsigned long long) start,
111		((unsigned long long) sz_min) / SZ_1M,
112		((unsigned long long) sz_max) / SZ_1M);
113
114	memblock_add(start, size);
115}
116
117/*
118 * Manage initrd
119 */
120#ifdef CONFIG_BLK_DEV_INITRD
121
122static int __init rd_start_early(char *p)
123{
124	unsigned long start = memparse(p, &p);
125
126#ifdef CONFIG_64BIT
127	/* Guess if the sign extension was forgotten by bootloader */
128	if (start < XKPHYS)
129		start = (int)start;
130#endif
131	initrd_start = start;
132	initrd_end += start;
133	return 0;
134}
135early_param("rd_start", rd_start_early);
136
137static int __init rd_size_early(char *p)
138{
139	initrd_end += memparse(p, &p);
140	return 0;
141}
142early_param("rd_size", rd_size_early);
143
144/* it returns the next free pfn after initrd */
145static unsigned long __init init_initrd(void)
146{
147	unsigned long end;
148
149	/*
150	 * Board specific code or command line parser should have
151	 * already set up initrd_start and initrd_end. In these cases
152	 * perfom sanity checks and use them if all looks good.
153	 */
154	if (!initrd_start || initrd_end <= initrd_start)
155		goto disable;
156
157	if (initrd_start & ~PAGE_MASK) {
158		pr_err("initrd start must be page aligned\n");
159		goto disable;
160	}
161	if (initrd_start < PAGE_OFFSET) {
162		pr_err("initrd start < PAGE_OFFSET\n");
163		goto disable;
164	}
165
166	/*
167	 * Sanitize initrd addresses. For example firmware
168	 * can't guess if they need to pass them through
169	 * 64-bits values if the kernel has been built in pure
170	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
171	 * addresses now, so the code can now safely use __pa().
172	 */
173	end = __pa(initrd_end);
174	initrd_end = (unsigned long)__va(end);
175	initrd_start = (unsigned long)__va(__pa(initrd_start));
176
177	ROOT_DEV = Root_RAM0;
178	return PFN_UP(end);
179disable:
180	initrd_start = 0;
181	initrd_end = 0;
182	return 0;
183}
184
185/* In some conditions (e.g. big endian bootloader with a little endian
186   kernel), the initrd might appear byte swapped.  Try to detect this and
187   byte swap it if needed.  */
188static void __init maybe_bswap_initrd(void)
189{
190#if defined(CONFIG_CPU_CAVIUM_OCTEON)
191	u64 buf;
192
193	/* Check for CPIO signature */
194	if (!memcmp((void *)initrd_start, "070701", 6))
195		return;
196
197	/* Check for compressed initrd */
198	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
199		return;
200
201	/* Try again with a byte swapped header */
202	buf = swab64p((u64 *)initrd_start);
203	if (!memcmp(&buf, "070701", 6) ||
204	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
205		unsigned long i;
206
207		pr_info("Byteswapped initrd detected\n");
208		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
209			swab64s((u64 *)i);
210	}
211#endif
212}
213
214static void __init finalize_initrd(void)
215{
216	unsigned long size = initrd_end - initrd_start;
217
218	if (size == 0) {
219		printk(KERN_INFO "Initrd not found or empty");
220		goto disable;
221	}
222	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
223		printk(KERN_ERR "Initrd extends beyond end of memory");
224		goto disable;
225	}
226
227	maybe_bswap_initrd();
228
229	memblock_reserve(__pa(initrd_start), size);
230	initrd_below_start_ok = 1;
231
232	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
233		initrd_start, size);
234	return;
235disable:
236	printk(KERN_CONT " - disabling initrd\n");
237	initrd_start = 0;
238	initrd_end = 0;
239}
240
241#else  /* !CONFIG_BLK_DEV_INITRD */
242
243static unsigned long __init init_initrd(void)
244{
245	return 0;
246}
247
248#define finalize_initrd()	do {} while (0)
249
250#endif
251
252/*
253 * Initialize the bootmem allocator. It also setup initrd related data
254 * if needed.
255 */
256#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
257
258static void __init bootmem_init(void)
259{
260	init_initrd();
261	finalize_initrd();
262}
263
264#else  /* !CONFIG_SGI_IP27 */
265
266static void __init bootmem_init(void)
267{
 
268	phys_addr_t ramstart, ramend;
269	unsigned long start, end;
270	int i;
271
272	ramstart = memblock_start_of_DRAM();
273	ramend = memblock_end_of_DRAM();
274
275	/*
276	 * Sanity check any INITRD first. We don't take it into account
277	 * for bootmem setup initially, rely on the end-of-kernel-code
278	 * as our memory range starting point. Once bootmem is inited we
279	 * will reserve the area used for the initrd.
280	 */
281	init_initrd();
282
283	/* Reserve memory occupied by kernel. */
284	memblock_reserve(__pa_symbol(&_text),
285			__pa_symbol(&_end) - __pa_symbol(&_text));
286
287	/* max_low_pfn is not a number of pages but the end pfn of low mem */
288
289#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
290	ARCH_PFN_OFFSET = PFN_UP(ramstart);
291#else
292	/*
293	 * Reserve any memory between the start of RAM and PHYS_OFFSET
294	 */
295	if (ramstart > PHYS_OFFSET)
296		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
297
298	if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
299		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
300			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
301			(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
302	}
303#endif
304
305	min_low_pfn = ARCH_PFN_OFFSET;
306	max_pfn = PFN_DOWN(ramend);
307	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
 
 
 
308		/*
309		 * Skip highmem here so we get an accurate max_low_pfn if low
310		 * memory stops short of high memory.
311		 * If the region overlaps HIGHMEM_START, end is clipped so
312		 * max_pfn excludes the highmem portion.
313		 */
 
 
314		if (start >= PFN_DOWN(HIGHMEM_START))
315			continue;
316		if (end > PFN_DOWN(HIGHMEM_START))
317			end = PFN_DOWN(HIGHMEM_START);
318		if (end > max_low_pfn)
319			max_low_pfn = end;
320	}
321
322	if (min_low_pfn >= max_low_pfn)
323		panic("Incorrect memory mapping !!!");
324
325	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
326#ifdef CONFIG_HIGHMEM
327		highstart_pfn = PFN_DOWN(HIGHMEM_START);
328		highend_pfn = max_pfn;
329#else
330		max_low_pfn = PFN_DOWN(HIGHMEM_START);
331		max_pfn = max_low_pfn;
332#endif
333	}
334
 
 
 
 
 
 
 
 
335	/*
336	 * Reserve initrd memory if needed.
337	 */
338	finalize_initrd();
339}
340
341#endif	/* CONFIG_SGI_IP27 */
342
343static int usermem __initdata;
344
345static int __init early_parse_mem(char *p)
346{
347	phys_addr_t start, size;
348
349	if (!p) {
350		pr_err("mem parameter is empty, do nothing\n");
351		return -EINVAL;
352	}
353
354	/*
355	 * If a user specifies memory size, we
356	 * blow away any automatically generated
357	 * size.
358	 */
359	if (usermem == 0) {
360		usermem = 1;
361		memblock_remove(memblock_start_of_DRAM(),
362			memblock_end_of_DRAM() - memblock_start_of_DRAM());
363	}
364	start = 0;
365	size = memparse(p, &p);
366	if (*p == '@')
367		start = memparse(p + 1, &p);
368
369	if (IS_ENABLED(CONFIG_NUMA))
370		memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
371	else
372		memblock_add(start, size);
373
374	return 0;
375}
376early_param("mem", early_parse_mem);
377
378static int __init early_parse_memmap(char *p)
379{
380	char *oldp;
381	u64 start_at, mem_size;
382
383	if (!p)
384		return -EINVAL;
385
386	if (!strncmp(p, "exactmap", 8)) {
387		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
388		return 0;
389	}
390
391	oldp = p;
392	mem_size = memparse(p, &p);
393	if (p == oldp)
394		return -EINVAL;
395
396	if (*p == '@') {
397		start_at = memparse(p+1, &p);
398		memblock_add(start_at, mem_size);
399	} else if (*p == '#') {
400		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
401		return -EINVAL;
402	} else if (*p == '$') {
403		start_at = memparse(p+1, &p);
404		memblock_add(start_at, mem_size);
405		memblock_reserve(start_at, mem_size);
406	} else {
407		pr_err("\"memmap\" invalid format!\n");
408		return -EINVAL;
409	}
410
411	if (*p == '\0') {
412		usermem = 1;
413		return 0;
414	} else
415		return -EINVAL;
416}
417early_param("memmap", early_parse_memmap);
418
419static void __init mips_reserve_vmcore(void)
420{
421#ifdef CONFIG_PROC_VMCORE
422	phys_addr_t start, end;
423	u64 i;
 
 
 
 
424
425	if (!elfcorehdr_size) {
426		for_each_mem_range(i, &start, &end) {
427			if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
428				/*
429				 * Reserve from the elf core header to the end of
430				 * the memory segment, that should all be kdump
431				 * reserved memory.
432				 */
433				elfcorehdr_size = end - elfcorehdr_addr;
434				break;
435			}
436		}
437	}
438
439	pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
440		(unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
441
442	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
443#endif
444}
 
 
445
446#ifdef CONFIG_KEXEC
447
448/* 64M alignment for crash kernel regions */
449#define CRASH_ALIGN	SZ_64M
450#define CRASH_ADDR_MAX	SZ_512M
451
452static void __init mips_parse_crashkernel(void)
453{
454	unsigned long long total_mem;
455	unsigned long long crash_size, crash_base;
456	int ret;
457
458	total_mem = memblock_phys_mem_size();
459	ret = parse_crashkernel(boot_command_line, total_mem,
460				&crash_size, &crash_base);
461	if (ret != 0 || crash_size <= 0)
462		return;
463
464	if (crash_base <= 0) {
465		crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
466						       CRASH_ALIGN,
467						       CRASH_ADDR_MAX);
468		if (!crash_base) {
469			pr_warn("crashkernel reservation failed - No suitable area found.\n");
470			return;
471		}
472	} else {
473		unsigned long long start;
474
475		start = memblock_phys_alloc_range(crash_size, 1,
476						  crash_base,
477						  crash_base + crash_size);
478		if (start != crash_base) {
479			pr_warn("Invalid memory region reserved for crash kernel\n");
480			return;
481		}
482	}
483
484	crashk_res.start = crash_base;
485	crashk_res.end	 = crash_base + crash_size - 1;
486}
487
488static void __init request_crashkernel(struct resource *res)
489{
490	int ret;
491
492	if (crashk_res.start == crashk_res.end)
493		return;
494
495	ret = request_resource(res, &crashk_res);
496	if (!ret)
497		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
498			(unsigned long)(resource_size(&crashk_res) >> 20),
 
499			(unsigned long)(crashk_res.start  >> 20));
500}
501#else /* !defined(CONFIG_KEXEC)		*/
502static void __init mips_parse_crashkernel(void)
503{
504}
505
506static void __init request_crashkernel(struct resource *res)
507{
508}
509#endif /* !defined(CONFIG_KEXEC)  */
510
511static void __init check_kernel_sections_mem(void)
512{
513	phys_addr_t start = __pa_symbol(&_text);
514	phys_addr_t size = __pa_symbol(&_end) - start;
515
516	if (!memblock_is_region_memory(start, size)) {
517		pr_info("Kernel sections are not in the memory maps\n");
518		memblock_add(start, size);
519	}
520}
521
522static void __init bootcmdline_append(const char *s, size_t max)
523{
524	if (!s[0] || !max)
525		return;
526
527	if (boot_command_line[0])
528		strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
529
530	strlcat(boot_command_line, s, max);
531}
532
533#ifdef CONFIG_OF_EARLY_FLATTREE
534
535static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
536					  int depth, void *data)
537{
538	bool *dt_bootargs = data;
539	const char *p;
540	int l;
541
542	if (depth != 1 || !data ||
543	    (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
544		return 0;
545
546	p = of_get_flat_dt_prop(node, "bootargs", &l);
547	if (p != NULL && l > 0) {
548		bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
549		*dt_bootargs = true;
550	}
551
552	return 1;
553}
554
555#endif /* CONFIG_OF_EARLY_FLATTREE */
556
557static void __init bootcmdline_init(void)
558{
559	bool dt_bootargs = false;
560
561	/*
562	 * If CMDLINE_OVERRIDE is enabled then initializing the command line is
563	 * trivial - we simply use the built-in command line unconditionally &
564	 * unmodified.
565	 */
566	if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
567		strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
568		return;
569	}
570
571	/*
572	 * If the user specified a built-in command line &
573	 * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
574	 * prepended to arguments from the bootloader or DT so we'll copy them
575	 * to the start of boot_command_line here. Otherwise, empty
576	 * boot_command_line to undo anything early_init_dt_scan_chosen() did.
577	 */
578	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
579		strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
580	else
581		boot_command_line[0] = 0;
582
583#ifdef CONFIG_OF_EARLY_FLATTREE
584	/*
585	 * If we're configured to take boot arguments from DT, look for those
586	 * now.
587	 */
588	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
589	    IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
590		of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
591#endif
592
593	/*
594	 * If we didn't get any arguments from DT (regardless of whether that's
595	 * because we weren't configured to look for them, or because we looked
596	 * & found none) then we'll take arguments from the bootloader.
597	 * plat_mem_setup() should have filled arcs_cmdline with arguments from
598	 * the bootloader.
599	 */
600	if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
601		bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
602
603	/*
604	 * If the user specified a built-in command line & we didn't already
605	 * prepend it, we append it to boot_command_line here.
606	 */
607	if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
608	    !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
609		bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
610}
611
612/*
613 * arch_mem_init - initialize memory management subsystem
614 *
615 *  o plat_mem_setup() detects the memory configuration and will record detected
616 *    memory areas using memblock_add.
617 *
618 * At this stage the memory configuration of the system is known to the
619 * kernel but generic memory management system is still entirely uninitialized.
620 *
621 *  o bootmem_init()
622 *  o sparse_init()
623 *  o paging_init()
624 *  o dma_contiguous_reserve()
625 *
626 * At this stage the bootmem allocator is ready to use.
627 *
628 * NOTE: historically plat_mem_setup did the entire platform initialization.
629 *	 This was rather impractical because it meant plat_mem_setup had to
630 * get away without any kind of memory allocator.  To keep old code from
631 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
632 * initialization hook for anything else was introduced.
633 */
634static void __init arch_mem_init(char **cmdline_p)
635{
 
 
 
 
 
 
 
 
 
 
 
636	/* call board setup routine */
637	plat_mem_setup();
638	memblock_set_bottom_up(true);
639
640	bootcmdline_init();
641	strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
642	*cmdline_p = command_line;
643
644	parse_early_param();
645
646	if (usermem)
647		pr_info("User-defined physical RAM map overwrite\n");
648
649	check_kernel_sections_mem();
650
651	early_init_fdt_reserve_self();
652	early_init_fdt_scan_reserved_mem();
653
654#ifndef CONFIG_NUMA
655	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
656#endif
657	bootmem_init();
658
659	/*
660	 * Prevent memblock from allocating high memory.
661	 * This cannot be done before max_low_pfn is detected, so up
662	 * to this point is possible to only reserve physical memory
663	 * with memblock_reserve; memblock_alloc* can be used
664	 * only after this point
665	 */
666	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
667
668	mips_reserve_vmcore();
 
 
 
 
 
 
669
670	mips_parse_crashkernel();
 
 
 
 
 
671	device_tree_init();
672
673	/*
674	 * In order to reduce the possibility of kernel panic when failed to
675	 * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
676	 * low memory as small as possible before plat_swiotlb_setup(), so
677	 * make sparse_init() using top-down allocation.
678	 */
679	memblock_set_bottom_up(false);
680	sparse_init();
681	memblock_set_bottom_up(true);
682
683	plat_swiotlb_setup();
684
685	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
686
687	/* Reserve for hibernation. */
688	memblock_reserve(__pa_symbol(&__nosave_begin),
689		__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
690
 
 
 
 
691	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
692}
693
694static void __init resource_init(void)
695{
696	phys_addr_t start, end;
697	u64 i;
698
699	if (UNCAC_BASE != IO_BASE)
700		return;
701
702	code_resource.start = __pa_symbol(&_text);
703	code_resource.end = __pa_symbol(&_etext) - 1;
704	data_resource.start = __pa_symbol(&_etext);
705	data_resource.end = __pa_symbol(&_edata) - 1;
706	bss_resource.start = __pa_symbol(&__bss_start);
707	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
708
709	for_each_mem_range(i, &start, &end) {
 
 
710		struct resource *res;
711
712		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
713		if (!res)
714			panic("%s: Failed to allocate %zu bytes\n", __func__,
715			      sizeof(struct resource));
716
717		res->start = start;
718		/*
719		 * In memblock, end points to the first byte after the
720		 * range while in resourses, end points to the last byte in
721		 * the range.
722		 */
723		res->end = end - 1;
724		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
725		res->name = "System RAM";
726
727		request_resource(&iomem_resource, res);
728
729		/*
730		 *  We don't know which RAM region contains kernel data,
731		 *  so we try it repeatedly and let the resource manager
732		 *  test it.
733		 */
734		request_resource(res, &code_resource);
735		request_resource(res, &data_resource);
736		request_resource(res, &bss_resource);
737		request_crashkernel(res);
738	}
739}
740
741#ifdef CONFIG_SMP
742static void __init prefill_possible_map(void)
743{
744	int i, possible = num_possible_cpus();
745
746	if (possible > nr_cpu_ids)
747		possible = nr_cpu_ids;
748
749	for (i = 0; i < possible; i++)
750		set_cpu_possible(i, true);
751	for (; i < NR_CPUS; i++)
752		set_cpu_possible(i, false);
753
754	set_nr_cpu_ids(possible);
755}
756#else
757static inline void prefill_possible_map(void) {}
758#endif
759
760static void __init setup_rng_seed(void)
761{
762	char *rng_seed_hex = fw_getenv("rngseed");
763	u8 rng_seed[512];
764	size_t len;
765
766	if (!rng_seed_hex)
767		return;
768
769	len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2);
770	if (hex2bin(rng_seed, rng_seed_hex, len))
771		return;
772
773	add_bootloader_randomness(rng_seed, len);
774	memzero_explicit(rng_seed, len);
775	memzero_explicit(rng_seed_hex, len * 2);
776}
777
778void __init setup_arch(char **cmdline_p)
779{
780	cpu_probe();
781	mips_cm_probe();
782	prom_init();
783
784	setup_early_fdc_console();
785#ifdef CONFIG_EARLY_PRINTK
786	setup_early_printk();
787#endif
788	cpu_report();
789	check_bugs_early();
790
791#if defined(CONFIG_VT)
792#if defined(CONFIG_VGA_CONSOLE)
793	conswitchp = &vga_con;
 
 
794#endif
795#endif
796
797	arch_mem_init(cmdline_p);
798	dmi_setup();
799
800	resource_init();
801	plat_smp_setup();
802	prefill_possible_map();
803
804	cpu_cache_init();
805	paging_init();
806
807	memblock_dump_all();
808
809	setup_rng_seed();
810}
811
812unsigned long kernelsp[NR_CPUS];
813unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
814
 
 
 
 
815#ifdef CONFIG_DEBUG_FS
816struct dentry *mips_debugfs_dir;
817static int __init debugfs_mips(void)
818{
819	mips_debugfs_dir = debugfs_create_dir("mips", NULL);
820	return 0;
821}
822arch_initcall(debugfs_mips);
823#endif
824
825#ifdef CONFIG_DMA_NONCOHERENT
 
 
 
 
 
826static int __init setcoherentio(char *str)
827{
828	dma_default_coherent = true;
829	pr_info("Hardware DMA cache coherency (command line)\n");
830	return 0;
831}
832early_param("coherentio", setcoherentio);
833
834static int __init setnocoherentio(char *str)
835{
836	dma_default_coherent = false;
837	pr_info("Software DMA cache coherency (command line)\n");
838	return 0;
839}
840early_param("nocoherentio", setnocoherentio);
841#endif
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1995 Linus Torvalds
  7 * Copyright (C) 1995 Waldorf Electronics
  8 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
  9 * Copyright (C) 1996 Stoned Elipot
 10 * Copyright (C) 1999 Silicon Graphics, Inc.
 11 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
 12 */
 13#include <linux/init.h>
 14#include <linux/ioport.h>
 15#include <linux/export.h>
 16#include <linux/screen_info.h>
 17#include <linux/memblock.h>
 18#include <linux/initrd.h>
 19#include <linux/root_dev.h>
 20#include <linux/highmem.h>
 21#include <linux/console.h>
 22#include <linux/pfn.h>
 23#include <linux/debugfs.h>
 24#include <linux/kexec.h>
 25#include <linux/sizes.h>
 26#include <linux/device.h>
 27#include <linux/dma-contiguous.h>
 28#include <linux/decompress/generic.h>
 29#include <linux/of_fdt.h>
 30#include <linux/of_reserved_mem.h>
 
 31
 32#include <asm/addrspace.h>
 33#include <asm/bootinfo.h>
 34#include <asm/bugs.h>
 35#include <asm/cache.h>
 36#include <asm/cdmm.h>
 37#include <asm/cpu.h>
 38#include <asm/debug.h>
 39#include <asm/dma-coherence.h>
 40#include <asm/sections.h>
 41#include <asm/setup.h>
 42#include <asm/smp-ops.h>
 43#include <asm/prom.h>
 
 44
 45#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
 46const char __section(.appended_dtb) __appended_dtb[0x100000];
 47#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
 48
 49struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
 50
 51EXPORT_SYMBOL(cpu_data);
 52
 53#ifdef CONFIG_VT
 54struct screen_info screen_info;
 55#endif
 56
 57/*
 58 * Setup information
 59 *
 60 * These are initialized so they are in the .data section
 61 */
 62unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
 63
 64EXPORT_SYMBOL(mips_machtype);
 65
 66static char __initdata command_line[COMMAND_LINE_SIZE];
 67char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
 68
 69#ifdef CONFIG_CMDLINE_BOOL
 70static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
 
 
 71#endif
 72
 73/*
 74 * mips_io_port_base is the begin of the address space to which x86 style
 75 * I/O ports are mapped.
 76 */
 77unsigned long mips_io_port_base = -1;
 78EXPORT_SYMBOL(mips_io_port_base);
 79
 80static struct resource code_resource = { .name = "Kernel code", };
 81static struct resource data_resource = { .name = "Kernel data", };
 82static struct resource bss_resource = { .name = "Kernel bss", };
 83
 
 
 
 84static void *detect_magic __initdata = detect_memory_region;
 85
 86#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
 87unsigned long ARCH_PFN_OFFSET;
 88EXPORT_SYMBOL(ARCH_PFN_OFFSET);
 89#endif
 90
 91void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
 92{
 93	/*
 94	 * Note: This function only exists for historical reason,
 95	 * new code should use memblock_add or memblock_add_node instead.
 96	 */
 97
 98	/*
 99	 * If the region reaches the top of the physical address space, adjust
100	 * the size slightly so that (start + size) doesn't overflow
101	 */
102	if (start + size - 1 == PHYS_ADDR_MAX)
103		--size;
104
105	/* Sanity check */
106	if (start + size < start) {
107		pr_warn("Trying to add an invalid memory region, skipped\n");
108		return;
109	}
110
111	if (start < PHYS_OFFSET)
112		return;
113
114	memblock_add(start, size);
115	/* Reserve any memory except the ordinary RAM ranges. */
116	switch (type) {
117	case BOOT_MEM_RAM:
118		break;
119
120	case BOOT_MEM_NOMAP: /* Discard the range from the system. */
121		memblock_remove(start, size);
122		break;
123
124	default: /* Reserve the rest of the memory types at boot time */
125		memblock_reserve(start, size);
126		break;
127	}
128}
129
130void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
131{
132	void *dm = &detect_magic;
133	phys_addr_t size;
134
135	for (size = sz_min; size < sz_max; size <<= 1) {
136		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
137			break;
138	}
139
140	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
141		((unsigned long long) size) / SZ_1M,
142		(unsigned long long) start,
143		((unsigned long long) sz_min) / SZ_1M,
144		((unsigned long long) sz_max) / SZ_1M);
145
146	add_memory_region(start, size, BOOT_MEM_RAM);
147}
148
149/*
150 * Manage initrd
151 */
152#ifdef CONFIG_BLK_DEV_INITRD
153
154static int __init rd_start_early(char *p)
155{
156	unsigned long start = memparse(p, &p);
157
158#ifdef CONFIG_64BIT
159	/* Guess if the sign extension was forgotten by bootloader */
160	if (start < XKPHYS)
161		start = (int)start;
162#endif
163	initrd_start = start;
164	initrd_end += start;
165	return 0;
166}
167early_param("rd_start", rd_start_early);
168
169static int __init rd_size_early(char *p)
170{
171	initrd_end += memparse(p, &p);
172	return 0;
173}
174early_param("rd_size", rd_size_early);
175
176/* it returns the next free pfn after initrd */
177static unsigned long __init init_initrd(void)
178{
179	unsigned long end;
180
181	/*
182	 * Board specific code or command line parser should have
183	 * already set up initrd_start and initrd_end. In these cases
184	 * perfom sanity checks and use them if all looks good.
185	 */
186	if (!initrd_start || initrd_end <= initrd_start)
187		goto disable;
188
189	if (initrd_start & ~PAGE_MASK) {
190		pr_err("initrd start must be page aligned\n");
191		goto disable;
192	}
193	if (initrd_start < PAGE_OFFSET) {
194		pr_err("initrd start < PAGE_OFFSET\n");
195		goto disable;
196	}
197
198	/*
199	 * Sanitize initrd addresses. For example firmware
200	 * can't guess if they need to pass them through
201	 * 64-bits values if the kernel has been built in pure
202	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
203	 * addresses now, so the code can now safely use __pa().
204	 */
205	end = __pa(initrd_end);
206	initrd_end = (unsigned long)__va(end);
207	initrd_start = (unsigned long)__va(__pa(initrd_start));
208
209	ROOT_DEV = Root_RAM0;
210	return PFN_UP(end);
211disable:
212	initrd_start = 0;
213	initrd_end = 0;
214	return 0;
215}
216
217/* In some conditions (e.g. big endian bootloader with a little endian
218   kernel), the initrd might appear byte swapped.  Try to detect this and
219   byte swap it if needed.  */
220static void __init maybe_bswap_initrd(void)
221{
222#if defined(CONFIG_CPU_CAVIUM_OCTEON)
223	u64 buf;
224
225	/* Check for CPIO signature */
226	if (!memcmp((void *)initrd_start, "070701", 6))
227		return;
228
229	/* Check for compressed initrd */
230	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
231		return;
232
233	/* Try again with a byte swapped header */
234	buf = swab64p((u64 *)initrd_start);
235	if (!memcmp(&buf, "070701", 6) ||
236	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
237		unsigned long i;
238
239		pr_info("Byteswapped initrd detected\n");
240		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
241			swab64s((u64 *)i);
242	}
243#endif
244}
245
246static void __init finalize_initrd(void)
247{
248	unsigned long size = initrd_end - initrd_start;
249
250	if (size == 0) {
251		printk(KERN_INFO "Initrd not found or empty");
252		goto disable;
253	}
254	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
255		printk(KERN_ERR "Initrd extends beyond end of memory");
256		goto disable;
257	}
258
259	maybe_bswap_initrd();
260
261	memblock_reserve(__pa(initrd_start), size);
262	initrd_below_start_ok = 1;
263
264	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
265		initrd_start, size);
266	return;
267disable:
268	printk(KERN_CONT " - disabling initrd\n");
269	initrd_start = 0;
270	initrd_end = 0;
271}
272
273#else  /* !CONFIG_BLK_DEV_INITRD */
274
275static unsigned long __init init_initrd(void)
276{
277	return 0;
278}
279
280#define finalize_initrd()	do {} while (0)
281
282#endif
283
284/*
285 * Initialize the bootmem allocator. It also setup initrd related data
286 * if needed.
287 */
288#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
289
290static void __init bootmem_init(void)
291{
292	init_initrd();
293	finalize_initrd();
294}
295
296#else  /* !CONFIG_SGI_IP27 */
297
298static void __init bootmem_init(void)
299{
300	struct memblock_region *mem;
301	phys_addr_t ramstart, ramend;
 
 
302
303	ramstart = memblock_start_of_DRAM();
304	ramend = memblock_end_of_DRAM();
305
306	/*
307	 * Sanity check any INITRD first. We don't take it into account
308	 * for bootmem setup initially, rely on the end-of-kernel-code
309	 * as our memory range starting point. Once bootmem is inited we
310	 * will reserve the area used for the initrd.
311	 */
312	init_initrd();
313
314	/* Reserve memory occupied by kernel. */
315	memblock_reserve(__pa_symbol(&_text),
316			__pa_symbol(&_end) - __pa_symbol(&_text));
317
318	/* max_low_pfn is not a number of pages but the end pfn of low mem */
319
320#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
321	ARCH_PFN_OFFSET = PFN_UP(ramstart);
322#else
323	/*
324	 * Reserve any memory between the start of RAM and PHYS_OFFSET
325	 */
326	if (ramstart > PHYS_OFFSET)
327		memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
328
329	if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
330		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
331			(unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
332			(unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
333	}
334#endif
335
336	min_low_pfn = ARCH_PFN_OFFSET;
337	max_pfn = PFN_DOWN(ramend);
338	for_each_memblock(memory, mem) {
339		unsigned long start = memblock_region_memory_base_pfn(mem);
340		unsigned long end = memblock_region_memory_end_pfn(mem);
341
342		/*
343		 * Skip highmem here so we get an accurate max_low_pfn if low
344		 * memory stops short of high memory.
345		 * If the region overlaps HIGHMEM_START, end is clipped so
346		 * max_pfn excludes the highmem portion.
347		 */
348		if (memblock_is_nomap(mem))
349			continue;
350		if (start >= PFN_DOWN(HIGHMEM_START))
351			continue;
352		if (end > PFN_DOWN(HIGHMEM_START))
353			end = PFN_DOWN(HIGHMEM_START);
354		if (end > max_low_pfn)
355			max_low_pfn = end;
356	}
357
358	if (min_low_pfn >= max_low_pfn)
359		panic("Incorrect memory mapping !!!");
360
361	if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
362#ifdef CONFIG_HIGHMEM
363		highstart_pfn = PFN_DOWN(HIGHMEM_START);
364		highend_pfn = max_pfn;
365#else
366		max_low_pfn = PFN_DOWN(HIGHMEM_START);
367		max_pfn = max_low_pfn;
368#endif
369	}
370
371
372	/*
373	 * In any case the added to the memblock memory regions
374	 * (highmem/lowmem, available/reserved, etc) are considered
375	 * as present, so inform sparsemem about them.
376	 */
377	memblocks_present();
378
379	/*
380	 * Reserve initrd memory if needed.
381	 */
382	finalize_initrd();
383}
384
385#endif	/* CONFIG_SGI_IP27 */
386
387static int usermem __initdata;
388
389static int __init early_parse_mem(char *p)
390{
391	phys_addr_t start, size;
392
 
 
 
 
 
393	/*
394	 * If a user specifies memory size, we
395	 * blow away any automatically generated
396	 * size.
397	 */
398	if (usermem == 0) {
399		usermem = 1;
400		memblock_remove(memblock_start_of_DRAM(),
401			memblock_end_of_DRAM() - memblock_start_of_DRAM());
402	}
403	start = 0;
404	size = memparse(p, &p);
405	if (*p == '@')
406		start = memparse(p + 1, &p);
407
408	add_memory_region(start, size, BOOT_MEM_RAM);
 
 
 
409
410	return 0;
411}
412early_param("mem", early_parse_mem);
413
414static int __init early_parse_memmap(char *p)
415{
416	char *oldp;
417	u64 start_at, mem_size;
418
419	if (!p)
420		return -EINVAL;
421
422	if (!strncmp(p, "exactmap", 8)) {
423		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
424		return 0;
425	}
426
427	oldp = p;
428	mem_size = memparse(p, &p);
429	if (p == oldp)
430		return -EINVAL;
431
432	if (*p == '@') {
433		start_at = memparse(p+1, &p);
434		add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
435	} else if (*p == '#') {
436		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
437		return -EINVAL;
438	} else if (*p == '$') {
439		start_at = memparse(p+1, &p);
440		add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
 
441	} else {
442		pr_err("\"memmap\" invalid format!\n");
443		return -EINVAL;
444	}
445
446	if (*p == '\0') {
447		usermem = 1;
448		return 0;
449	} else
450		return -EINVAL;
451}
452early_param("memmap", early_parse_memmap);
453
 
 
454#ifdef CONFIG_PROC_VMCORE
455unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
456static int __init early_parse_elfcorehdr(char *p)
457{
458	struct memblock_region *mem;
459
460	setup_elfcorehdr = memparse(p, &p);
461
462	 for_each_memblock(memory, mem) {
463		unsigned long start = mem->base;
464		unsigned long end = start + mem->size;
465		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
466			/*
467			 * Reserve from the elf core header to the end of
468			 * the memory segment, that should all be kdump
469			 * reserved memory.
470			 */
471			setup_elfcorehdr_size = end - setup_elfcorehdr;
472			break;
473		}
474	}
475	/*
476	 * If we don't find it in the memory map, then we shouldn't
477	 * have to worry about it, as the new kernel won't use it.
478	 */
479	return 0;
 
480}
481early_param("elfcorehdr", early_parse_elfcorehdr);
482#endif
483
484#ifdef CONFIG_KEXEC
 
 
 
 
 
485static void __init mips_parse_crashkernel(void)
486{
487	unsigned long long total_mem;
488	unsigned long long crash_size, crash_base;
489	int ret;
490
491	total_mem = memblock_phys_mem_size();
492	ret = parse_crashkernel(boot_command_line, total_mem,
493				&crash_size, &crash_base);
494	if (ret != 0 || crash_size <= 0)
495		return;
496
497	if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
498		pr_warn("Invalid memory region reserved for crash kernel\n");
499		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500	}
501
502	crashk_res.start = crash_base;
503	crashk_res.end	 = crash_base + crash_size - 1;
504}
505
506static void __init request_crashkernel(struct resource *res)
507{
508	int ret;
509
510	if (crashk_res.start == crashk_res.end)
511		return;
512
513	ret = request_resource(res, &crashk_res);
514	if (!ret)
515		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
516			(unsigned long)((crashk_res.end -
517					 crashk_res.start + 1) >> 20),
518			(unsigned long)(crashk_res.start  >> 20));
519}
520#else /* !defined(CONFIG_KEXEC)		*/
521static void __init mips_parse_crashkernel(void)
522{
523}
524
525static void __init request_crashkernel(struct resource *res)
526{
527}
528#endif /* !defined(CONFIG_KEXEC)  */
529
530static void __init check_kernel_sections_mem(void)
531{
532	phys_addr_t start = PFN_PHYS(PFN_DOWN(__pa_symbol(&_text)));
533	phys_addr_t size = PFN_PHYS(PFN_UP(__pa_symbol(&_end))) - start;
534
535	if (!memblock_is_region_memory(start, size)) {
536		pr_info("Kernel sections are not in the memory maps\n");
537		memblock_add(start, size);
538	}
539}
540
541#define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
542#define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
543#define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
544#define BUILTIN_EXTEND_WITH_PROM	\
545	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
546
547/*
548 * arch_mem_init - initialize memory management subsystem
549 *
550 *  o plat_mem_setup() detects the memory configuration and will record detected
551 *    memory areas using add_memory_region.
552 *
553 * At this stage the memory configuration of the system is known to the
554 * kernel but generic memory management system is still entirely uninitialized.
555 *
556 *  o bootmem_init()
557 *  o sparse_init()
558 *  o paging_init()
559 *  o dma_contiguous_reserve()
560 *
561 * At this stage the bootmem allocator is ready to use.
562 *
563 * NOTE: historically plat_mem_setup did the entire platform initialization.
564 *	 This was rather impractical because it meant plat_mem_setup had to
565 * get away without any kind of memory allocator.  To keep old code from
566 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
567 * initialization hook for anything else was introduced.
568 */
569static void __init arch_mem_init(char **cmdline_p)
570{
571	extern void plat_mem_setup(void);
572
573	/*
574	 * Initialize boot_command_line to an innocuous but non-empty string in
575	 * order to prevent early_init_dt_scan_chosen() from copying
576	 * CONFIG_CMDLINE into it without our knowledge. We handle
577	 * CONFIG_CMDLINE ourselves below & don't want to duplicate its
578	 * content because repeating arguments can be problematic.
579	 */
580	strlcpy(boot_command_line, " ", COMMAND_LINE_SIZE);
581
582	/* call board setup routine */
583	plat_mem_setup();
584	memblock_set_bottom_up(true);
585
586#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
587	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
588#else
589	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
590	    (USE_DTB_CMDLINE && !boot_command_line[0]))
591		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
592
593	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
594		if (boot_command_line[0])
595			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
596		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
597	}
598
599#if defined(CONFIG_CMDLINE_BOOL)
600	if (builtin_cmdline[0]) {
601		if (boot_command_line[0])
602			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
603		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
604	}
605
606	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
607		if (boot_command_line[0])
608			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
609		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
610	}
611#endif
612#endif
613	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
614
615	*cmdline_p = command_line;
616
617	parse_early_param();
618
619	if (usermem)
620		pr_info("User-defined physical RAM map overwrite\n");
621
622	check_kernel_sections_mem();
623
624	early_init_fdt_reserve_self();
625	early_init_fdt_scan_reserved_mem();
626
627#ifndef CONFIG_NUMA
628	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
629#endif
630	bootmem_init();
631
632	/*
633	 * Prevent memblock from allocating high memory.
634	 * This cannot be done before max_low_pfn is detected, so up
635	 * to this point is possible to only reserve physical memory
636	 * with memblock_reserve; memblock_alloc* can be used
637	 * only after this point
638	 */
639	memblock_set_current_limit(PFN_PHYS(max_low_pfn));
640
641#ifdef CONFIG_PROC_VMCORE
642	if (setup_elfcorehdr && setup_elfcorehdr_size) {
643		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
644		       setup_elfcorehdr, setup_elfcorehdr_size);
645		memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size);
646	}
647#endif
648
649	mips_parse_crashkernel();
650#ifdef CONFIG_KEXEC
651	if (crashk_res.start != crashk_res.end)
652		memblock_reserve(crashk_res.start,
653				 crashk_res.end - crashk_res.start + 1);
654#endif
655	device_tree_init();
 
 
 
 
 
 
 
 
656	sparse_init();
 
 
657	plat_swiotlb_setup();
658
659	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
660
661	/* Reserve for hibernation. */
662	memblock_reserve(__pa_symbol(&__nosave_begin),
663		__pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
664
665	fdt_init_reserved_mem();
666
667	memblock_dump_all();
668
669	early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
670}
671
672static void __init resource_init(void)
673{
674	struct memblock_region *region;
 
675
676	if (UNCAC_BASE != IO_BASE)
677		return;
678
679	code_resource.start = __pa_symbol(&_text);
680	code_resource.end = __pa_symbol(&_etext) - 1;
681	data_resource.start = __pa_symbol(&_etext);
682	data_resource.end = __pa_symbol(&_edata) - 1;
683	bss_resource.start = __pa_symbol(&__bss_start);
684	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
685
686	for_each_memblock(memory, region) {
687		phys_addr_t start = PFN_PHYS(memblock_region_memory_base_pfn(region));
688		phys_addr_t end = PFN_PHYS(memblock_region_memory_end_pfn(region)) - 1;
689		struct resource *res;
690
691		res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
692		if (!res)
693			panic("%s: Failed to allocate %zu bytes\n", __func__,
694			      sizeof(struct resource));
695
696		res->start = start;
697		res->end = end;
 
 
 
 
 
698		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
699		res->name = "System RAM";
700
701		request_resource(&iomem_resource, res);
702
703		/*
704		 *  We don't know which RAM region contains kernel data,
705		 *  so we try it repeatedly and let the resource manager
706		 *  test it.
707		 */
708		request_resource(res, &code_resource);
709		request_resource(res, &data_resource);
710		request_resource(res, &bss_resource);
711		request_crashkernel(res);
712	}
713}
714
715#ifdef CONFIG_SMP
716static void __init prefill_possible_map(void)
717{
718	int i, possible = num_possible_cpus();
719
720	if (possible > nr_cpu_ids)
721		possible = nr_cpu_ids;
722
723	for (i = 0; i < possible; i++)
724		set_cpu_possible(i, true);
725	for (; i < NR_CPUS; i++)
726		set_cpu_possible(i, false);
727
728	nr_cpu_ids = possible;
729}
730#else
731static inline void prefill_possible_map(void) {}
732#endif
733
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
734void __init setup_arch(char **cmdline_p)
735{
736	cpu_probe();
737	mips_cm_probe();
738	prom_init();
739
740	setup_early_fdc_console();
741#ifdef CONFIG_EARLY_PRINTK
742	setup_early_printk();
743#endif
744	cpu_report();
745	check_bugs_early();
746
747#if defined(CONFIG_VT)
748#if defined(CONFIG_VGA_CONSOLE)
749	conswitchp = &vga_con;
750#elif defined(CONFIG_DUMMY_CONSOLE)
751	conswitchp = &dummy_con;
752#endif
753#endif
754
755	arch_mem_init(cmdline_p);
 
756
757	resource_init();
758	plat_smp_setup();
759	prefill_possible_map();
760
761	cpu_cache_init();
762	paging_init();
 
 
 
 
763}
764
765unsigned long kernelsp[NR_CPUS];
766unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
767
768#ifdef CONFIG_USE_OF
769unsigned long fw_passed_dtb;
770#endif
771
772#ifdef CONFIG_DEBUG_FS
773struct dentry *mips_debugfs_dir;
774static int __init debugfs_mips(void)
775{
776	mips_debugfs_dir = debugfs_create_dir("mips", NULL);
777	return 0;
778}
779arch_initcall(debugfs_mips);
780#endif
781
782#ifdef CONFIG_DMA_MAYBE_COHERENT
783/* User defined DMA coherency from command line. */
784enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
785EXPORT_SYMBOL_GPL(coherentio);
786int hw_coherentio = 0;	/* Actual hardware supported DMA coherency setting. */
787
788static int __init setcoherentio(char *str)
789{
790	coherentio = IO_COHERENCE_ENABLED;
791	pr_info("Hardware DMA cache coherency (command line)\n");
792	return 0;
793}
794early_param("coherentio", setcoherentio);
795
796static int __init setnocoherentio(char *str)
797{
798	coherentio = IO_COHERENCE_DISABLED;
799	pr_info("Software DMA cache coherency (command line)\n");
800	return 0;
801}
802early_param("nocoherentio", setnocoherentio);
803#endif