Linux Audio

Check our new training course

Loading...
v4.6
   1/*
   2 *  linux/arch/arm/kernel/setup.c
   3 *
   4 *  Copyright (C) 1995-2001 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/efi.h>
  11#include <linux/export.h>
  12#include <linux/kernel.h>
  13#include <linux/stddef.h>
  14#include <linux/ioport.h>
  15#include <linux/delay.h>
  16#include <linux/utsname.h>
  17#include <linux/initrd.h>
  18#include <linux/console.h>
  19#include <linux/bootmem.h>
  20#include <linux/seq_file.h>
  21#include <linux/screen_info.h>
  22#include <linux/of_iommu.h>
  23#include <linux/of_platform.h>
  24#include <linux/init.h>
  25#include <linux/kexec.h>
  26#include <linux/of_fdt.h>
 
  27#include <linux/cpu.h>
  28#include <linux/interrupt.h>
  29#include <linux/smp.h>
 
  30#include <linux/proc_fs.h>
  31#include <linux/memblock.h>
  32#include <linux/bug.h>
  33#include <linux/compiler.h>
  34#include <linux/sort.h>
  35#include <linux/psci.h>
  36
  37#include <asm/unified.h>
  38#include <asm/cp15.h>
  39#include <asm/cpu.h>
  40#include <asm/cputype.h>
  41#include <asm/efi.h>
  42#include <asm/elf.h>
  43#include <asm/early_ioremap.h>
  44#include <asm/fixmap.h>
  45#include <asm/procinfo.h>
  46#include <asm/psci.h>
  47#include <asm/sections.h>
  48#include <asm/setup.h>
  49#include <asm/smp_plat.h>
  50#include <asm/mach-types.h>
  51#include <asm/cacheflush.h>
  52#include <asm/cachetype.h>
  53#include <asm/tlbflush.h>
  54#include <asm/xen/hypervisor.h>
  55
  56#include <asm/prom.h>
  57#include <asm/mach/arch.h>
  58#include <asm/mach/irq.h>
  59#include <asm/mach/time.h>
  60#include <asm/system_info.h>
  61#include <asm/system_misc.h>
  62#include <asm/traps.h>
  63#include <asm/unwind.h>
  64#include <asm/memblock.h>
  65#include <asm/virt.h>
  66
 
 
 
  67#include "atags.h"
 
  68
 
 
 
  69
  70#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  71char fpe_type[8];
  72
  73static int __init fpe_setup(char *line)
  74{
  75	memcpy(fpe_type, line, 8);
  76	return 1;
  77}
  78
  79__setup("fpe=", fpe_setup);
  80#endif
  81
  82extern void init_default_cache_policy(unsigned long);
  83extern void paging_init(const struct machine_desc *desc);
  84extern void early_paging_init(const struct machine_desc *);
  85extern void sanity_check_meminfo(void);
  86extern enum reboot_mode reboot_mode;
  87extern void setup_dma_zone(const struct machine_desc *desc);
  88
  89unsigned int processor_id;
  90EXPORT_SYMBOL(processor_id);
  91unsigned int __machine_arch_type __read_mostly;
  92EXPORT_SYMBOL(__machine_arch_type);
  93unsigned int cacheid __read_mostly;
  94EXPORT_SYMBOL(cacheid);
  95
  96unsigned int __atags_pointer __initdata;
  97
  98unsigned int system_rev;
  99EXPORT_SYMBOL(system_rev);
 100
 101const char *system_serial;
 102EXPORT_SYMBOL(system_serial);
 103
 104unsigned int system_serial_low;
 105EXPORT_SYMBOL(system_serial_low);
 106
 107unsigned int system_serial_high;
 108EXPORT_SYMBOL(system_serial_high);
 109
 110unsigned int elf_hwcap __read_mostly;
 111EXPORT_SYMBOL(elf_hwcap);
 112
 113unsigned int elf_hwcap2 __read_mostly;
 114EXPORT_SYMBOL(elf_hwcap2);
 115
 116
 117#ifdef MULTI_CPU
 118struct processor processor __read_mostly;
 119#endif
 120#ifdef MULTI_TLB
 121struct cpu_tlb_fns cpu_tlb __read_mostly;
 122#endif
 123#ifdef MULTI_USER
 124struct cpu_user_fns cpu_user __read_mostly;
 125#endif
 126#ifdef MULTI_CACHE
 127struct cpu_cache_fns cpu_cache __read_mostly;
 128#endif
 129#ifdef CONFIG_OUTER_CACHE
 130struct outer_cache_fns outer_cache __read_mostly;
 131EXPORT_SYMBOL(outer_cache);
 132#endif
 133
 134/*
 135 * Cached cpu_architecture() result for use by assembler code.
 136 * C code should use the cpu_architecture() function instead of accessing this
 137 * variable directly.
 138 */
 139int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
 140
 141struct stack {
 142	u32 irq[3];
 143	u32 abt[3];
 144	u32 und[3];
 145	u32 fiq[3];
 146} ____cacheline_aligned;
 147
 148#ifndef CONFIG_CPU_V7M
 149static struct stack stacks[NR_CPUS];
 150#endif
 151
 152char elf_platform[ELF_PLATFORM_SIZE];
 153EXPORT_SYMBOL(elf_platform);
 154
 155static const char *cpu_name;
 156static const char *machine_name;
 157static char __initdata cmd_line[COMMAND_LINE_SIZE];
 158const struct machine_desc *machine_desc __initdata;
 159
 
 160static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 161#define ENDIANNESS ((char)endian_test.l)
 162
 163DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
 164
 165/*
 166 * Standard memory resources
 167 */
 168static struct resource mem_res[] = {
 169	{
 170		.name = "Video RAM",
 171		.start = 0,
 172		.end = 0,
 173		.flags = IORESOURCE_MEM
 174	},
 175	{
 176		.name = "Kernel code",
 177		.start = 0,
 178		.end = 0,
 179		.flags = IORESOURCE_SYSTEM_RAM
 180	},
 181	{
 182		.name = "Kernel data",
 183		.start = 0,
 184		.end = 0,
 185		.flags = IORESOURCE_SYSTEM_RAM
 186	}
 187};
 188
 189#define video_ram   mem_res[0]
 190#define kernel_code mem_res[1]
 191#define kernel_data mem_res[2]
 192
 193static struct resource io_res[] = {
 194	{
 195		.name = "reserved",
 196		.start = 0x3bc,
 197		.end = 0x3be,
 198		.flags = IORESOURCE_IO | IORESOURCE_BUSY
 199	},
 200	{
 201		.name = "reserved",
 202		.start = 0x378,
 203		.end = 0x37f,
 204		.flags = IORESOURCE_IO | IORESOURCE_BUSY
 205	},
 206	{
 207		.name = "reserved",
 208		.start = 0x278,
 209		.end = 0x27f,
 210		.flags = IORESOURCE_IO | IORESOURCE_BUSY
 211	}
 212};
 213
 214#define lp0 io_res[0]
 215#define lp1 io_res[1]
 216#define lp2 io_res[2]
 217
 218static const char *proc_arch[] = {
 219	"undefined/unknown",
 220	"3",
 221	"4",
 222	"4T",
 223	"5",
 224	"5T",
 225	"5TE",
 226	"5TEJ",
 227	"6TEJ",
 228	"7",
 229	"7M",
 230	"?(12)",
 231	"?(13)",
 232	"?(14)",
 233	"?(15)",
 234	"?(16)",
 235	"?(17)",
 236};
 237
 238#ifdef CONFIG_CPU_V7M
 239static int __get_cpu_architecture(void)
 240{
 241	return CPU_ARCH_ARMv7M;
 242}
 243#else
 244static int __get_cpu_architecture(void)
 245{
 246	int cpu_arch;
 247
 248	if ((read_cpuid_id() & 0x0008f000) == 0) {
 249		cpu_arch = CPU_ARCH_UNKNOWN;
 250	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
 251		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
 252	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
 253		cpu_arch = (read_cpuid_id() >> 16) & 7;
 254		if (cpu_arch)
 255			cpu_arch += CPU_ARCH_ARMv3;
 256	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
 
 
 257		/* Revised CPUID format. Read the Memory Model Feature
 258		 * Register 0 and check for VMSAv7 or PMSAv7 */
 259		unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
 
 260		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
 261		    (mmfr0 & 0x000000f0) >= 0x00000030)
 262			cpu_arch = CPU_ARCH_ARMv7;
 263		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 264			 (mmfr0 & 0x000000f0) == 0x00000020)
 265			cpu_arch = CPU_ARCH_ARMv6;
 266		else
 267			cpu_arch = CPU_ARCH_UNKNOWN;
 268	} else
 269		cpu_arch = CPU_ARCH_UNKNOWN;
 270
 271	return cpu_arch;
 272}
 273#endif
 274
 275int __pure cpu_architecture(void)
 276{
 277	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
 278
 279	return __cpu_architecture;
 280}
 281
 282static int cpu_has_aliasing_icache(unsigned int arch)
 283{
 284	int aliasing_icache;
 285	unsigned int id_reg, num_sets, line_size;
 286
 287	/* PIPT caches never alias. */
 288	if (icache_is_pipt())
 289		return 0;
 290
 291	/* arch specifies the register format */
 292	switch (arch) {
 293	case CPU_ARCH_ARMv7:
 294		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
 295		    : /* No output operands */
 296		    : "r" (1));
 297		isb();
 298		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
 299		    : "=r" (id_reg));
 300		line_size = 4 << ((id_reg & 0x7) + 2);
 301		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
 302		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
 303		break;
 304	case CPU_ARCH_ARMv6:
 305		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
 306		break;
 307	default:
 308		/* I-cache aliases will be handled by D-cache aliasing code */
 309		aliasing_icache = 0;
 310	}
 311
 312	return aliasing_icache;
 313}
 314
 315static void __init cacheid_init(void)
 316{
 
 317	unsigned int arch = cpu_architecture();
 318
 319	if (arch == CPU_ARCH_ARMv7M) {
 320		cacheid = 0;
 321	} else if (arch >= CPU_ARCH_ARMv6) {
 322		unsigned int cachetype = read_cpuid_cachetype();
 323		if ((cachetype & (7 << 29)) == 4 << 29) {
 324			/* ARMv7 register format */
 325			arch = CPU_ARCH_ARMv7;
 326			cacheid = CACHEID_VIPT_NONALIASING;
 327			switch (cachetype & (3 << 14)) {
 328			case (1 << 14):
 329				cacheid |= CACHEID_ASID_TAGGED;
 330				break;
 331			case (3 << 14):
 332				cacheid |= CACHEID_PIPT;
 333				break;
 334			}
 335		} else {
 336			arch = CPU_ARCH_ARMv6;
 337			if (cachetype & (1 << 23))
 338				cacheid = CACHEID_VIPT_ALIASING;
 339			else
 340				cacheid = CACHEID_VIPT_NONALIASING;
 341		}
 342		if (cpu_has_aliasing_icache(arch))
 343			cacheid |= CACHEID_VIPT_I_ALIASING;
 344	} else {
 345		cacheid = CACHEID_VIVT;
 346	}
 347
 348	pr_info("CPU: %s data cache, %s instruction cache\n",
 349		cache_is_vivt() ? "VIVT" :
 350		cache_is_vipt_aliasing() ? "VIPT aliasing" :
 351		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
 352		cache_is_vivt() ? "VIVT" :
 353		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
 354		icache_is_vipt_aliasing() ? "VIPT aliasing" :
 355		icache_is_pipt() ? "PIPT" :
 356		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
 357}
 358
 359/*
 360 * These functions re-use the assembly code in head.S, which
 361 * already provide the required functionality.
 362 */
 363extern struct proc_info_list *lookup_processor_type(unsigned int);
 364
 365void __init early_print(const char *str, ...)
 366{
 367	extern void printascii(const char *);
 368	char buf[256];
 369	va_list ap;
 370
 371	va_start(ap, str);
 372	vsnprintf(buf, sizeof(buf), str, ap);
 373	va_end(ap);
 374
 375#ifdef CONFIG_DEBUG_LL
 376	printascii(buf);
 377#endif
 378	printk("%s", buf);
 379}
 380
 381#ifdef CONFIG_ARM_PATCH_IDIV
 382
 383static inline u32 __attribute_const__ sdiv_instruction(void)
 384{
 385	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
 386		/* "sdiv r0, r0, r1" */
 387		u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
 388		return __opcode_to_mem_thumb32(insn);
 389	}
 390
 391	/* "sdiv r0, r0, r1" */
 392	return __opcode_to_mem_arm(0xe710f110);
 393}
 394
 395static inline u32 __attribute_const__ udiv_instruction(void)
 396{
 397	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
 398		/* "udiv r0, r0, r1" */
 399		u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
 400		return __opcode_to_mem_thumb32(insn);
 401	}
 402
 403	/* "udiv r0, r0, r1" */
 404	return __opcode_to_mem_arm(0xe730f110);
 405}
 406
 407static inline u32 __attribute_const__ bx_lr_instruction(void)
 408{
 409	if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
 410		/* "bx lr; nop" */
 411		u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
 412		return __opcode_to_mem_thumb32(insn);
 413	}
 414
 415	/* "bx lr" */
 416	return __opcode_to_mem_arm(0xe12fff1e);
 417}
 418
 419static void __init patch_aeabi_idiv(void)
 420{
 421	extern void __aeabi_uidiv(void);
 422	extern void __aeabi_idiv(void);
 423	uintptr_t fn_addr;
 424	unsigned int mask;
 425
 426	mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
 427	if (!(elf_hwcap & mask))
 428		return;
 429
 430	pr_info("CPU: div instructions available: patching division code\n");
 431
 432	fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
 433	asm ("" : "+g" (fn_addr));
 434	((u32 *)fn_addr)[0] = udiv_instruction();
 435	((u32 *)fn_addr)[1] = bx_lr_instruction();
 436	flush_icache_range(fn_addr, fn_addr + 8);
 437
 438	fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
 439	asm ("" : "+g" (fn_addr));
 440	((u32 *)fn_addr)[0] = sdiv_instruction();
 441	((u32 *)fn_addr)[1] = bx_lr_instruction();
 442	flush_icache_range(fn_addr, fn_addr + 8);
 443}
 444
 445#else
 446static inline void patch_aeabi_idiv(void) { }
 447#endif
 448
 449static void __init cpuid_init_hwcaps(void)
 450{
 451	int block;
 452	u32 isar5;
 453
 454	if (cpu_architecture() < CPU_ARCH_ARMv7)
 455		return;
 456
 457	block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
 458	if (block >= 2)
 459		elf_hwcap |= HWCAP_IDIVA;
 460	if (block >= 1)
 461		elf_hwcap |= HWCAP_IDIVT;
 462
 463	/* LPAE implies atomic ldrd/strd instructions */
 464	block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
 465	if (block >= 5)
 466		elf_hwcap |= HWCAP_LPAE;
 467
 468	/* check for supported v8 Crypto instructions */
 469	isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
 470
 471	block = cpuid_feature_extract_field(isar5, 4);
 472	if (block >= 2)
 473		elf_hwcap2 |= HWCAP2_PMULL;
 474	if (block >= 1)
 475		elf_hwcap2 |= HWCAP2_AES;
 476
 477	block = cpuid_feature_extract_field(isar5, 8);
 478	if (block >= 1)
 479		elf_hwcap2 |= HWCAP2_SHA1;
 480
 481	block = cpuid_feature_extract_field(isar5, 12);
 482	if (block >= 1)
 483		elf_hwcap2 |= HWCAP2_SHA2;
 484
 485	block = cpuid_feature_extract_field(isar5, 16);
 486	if (block >= 1)
 487		elf_hwcap2 |= HWCAP2_CRC32;
 488}
 489
 490static void __init elf_hwcap_fixup(void)
 491{
 492	unsigned id = read_cpuid_id();
 493
 494	/*
 495	 * HWCAP_TLS is available only on 1136 r1p0 and later,
 496	 * see also kuser_get_tls_init.
 497	 */
 498	if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
 499	    ((id >> 20) & 3) == 0) {
 500		elf_hwcap &= ~HWCAP_TLS;
 501		return;
 502	}
 503
 504	/* Verify if CPUID scheme is implemented */
 505	if ((id & 0x000f0000) != 0x000f0000)
 506		return;
 507
 508	/*
 509	 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
 510	 * avoid advertising SWP; it may not be atomic with
 511	 * multiprocessing cores.
 512	 */
 513	if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
 514	    (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
 515	     cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
 516		elf_hwcap &= ~HWCAP_SWP;
 517}
 518
 519/*
 520 * cpu_init - initialise one CPU.
 521 *
 522 * cpu_init sets up the per-CPU stacks.
 523 */
 524void notrace cpu_init(void)
 525{
 526#ifndef CONFIG_CPU_V7M
 527	unsigned int cpu = smp_processor_id();
 528	struct stack *stk = &stacks[cpu];
 529
 530	if (cpu >= NR_CPUS) {
 531		pr_crit("CPU%u: bad primary CPU number\n", cpu);
 532		BUG();
 533	}
 534
 535	/*
 536	 * This only works on resume and secondary cores. For booting on the
 537	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
 538	 */
 539	set_my_cpu_offset(per_cpu_offset(cpu));
 540
 541	cpu_proc_init();
 542
 543	/*
 544	 * Define the placement constraint for the inline asm directive below.
 545	 * In Thumb-2, msr with an immediate value is not allowed.
 546	 */
 547#ifdef CONFIG_THUMB2_KERNEL
 548#define PLC	"r"
 549#else
 550#define PLC	"I"
 551#endif
 552
 553	/*
 554	 * setup stacks for re-entrant exception handlers
 555	 */
 556	__asm__ (
 557	"msr	cpsr_c, %1\n\t"
 558	"add	r14, %0, %2\n\t"
 559	"mov	sp, r14\n\t"
 560	"msr	cpsr_c, %3\n\t"
 561	"add	r14, %0, %4\n\t"
 562	"mov	sp, r14\n\t"
 563	"msr	cpsr_c, %5\n\t"
 564	"add	r14, %0, %6\n\t"
 565	"mov	sp, r14\n\t"
 566	"msr	cpsr_c, %7\n\t"
 567	"add	r14, %0, %8\n\t"
 568	"mov	sp, r14\n\t"
 569	"msr	cpsr_c, %9"
 570	    :
 571	    : "r" (stk),
 572	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
 573	      "I" (offsetof(struct stack, irq[0])),
 574	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
 575	      "I" (offsetof(struct stack, abt[0])),
 576	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
 577	      "I" (offsetof(struct stack, und[0])),
 578	      PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
 579	      "I" (offsetof(struct stack, fiq[0])),
 580	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
 581	    : "r14");
 582#endif
 583}
 584
 585u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
 586
 587void __init smp_setup_processor_id(void)
 588{
 589	int i;
 590	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
 591	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 592
 593	cpu_logical_map(0) = cpu;
 594	for (i = 1; i < nr_cpu_ids; ++i)
 595		cpu_logical_map(i) = i == cpu ? 0 : i;
 596
 597	/*
 598	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
 599	 * using percpu variable early, for example, lockdep will
 600	 * access percpu variable inside lock_release
 601	 */
 602	set_my_cpu_offset(0);
 603
 604	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
 605}
 606
 607struct mpidr_hash mpidr_hash;
 608#ifdef CONFIG_SMP
 609/**
 610 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 611 *			  level in order to build a linear index from an
 612 *			  MPIDR value. Resulting algorithm is a collision
 613 *			  free hash carried out through shifting and ORing
 614 */
 615static void __init smp_build_mpidr_hash(void)
 616{
 617	u32 i, affinity;
 618	u32 fs[3], bits[3], ls, mask = 0;
 619	/*
 620	 * Pre-scan the list of MPIDRS and filter out bits that do
 621	 * not contribute to affinity levels, ie they never toggle.
 622	 */
 623	for_each_possible_cpu(i)
 624		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
 625	pr_debug("mask of set bits 0x%x\n", mask);
 626	/*
 627	 * Find and stash the last and first bit set at all affinity levels to
 628	 * check how many bits are required to represent them.
 629	 */
 630	for (i = 0; i < 3; i++) {
 631		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
 632		/*
 633		 * Find the MSB bit and LSB bits position
 634		 * to determine how many bits are required
 635		 * to express the affinity level.
 636		 */
 637		ls = fls(affinity);
 638		fs[i] = affinity ? ffs(affinity) - 1 : 0;
 639		bits[i] = ls - fs[i];
 640	}
 641	/*
 642	 * An index can be created from the MPIDR by isolating the
 643	 * significant bits at each affinity level and by shifting
 644	 * them in order to compress the 24 bits values space to a
 645	 * compressed set of values. This is equivalent to hashing
 646	 * the MPIDR through shifting and ORing. It is a collision free
 647	 * hash though not minimal since some levels might contain a number
 648	 * of CPUs that is not an exact power of 2 and their bit
 649	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
 650	 */
 651	mpidr_hash.shift_aff[0] = fs[0];
 652	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
 653	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
 654						(bits[1] + bits[0]);
 655	mpidr_hash.mask = mask;
 656	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
 657	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
 658				mpidr_hash.shift_aff[0],
 659				mpidr_hash.shift_aff[1],
 660				mpidr_hash.shift_aff[2],
 661				mpidr_hash.mask,
 662				mpidr_hash.bits);
 663	/*
 664	 * 4x is an arbitrary value used to warn on a hash table much bigger
 665	 * than expected on most systems.
 666	 */
 667	if (mpidr_hash_size() > 4 * num_possible_cpus())
 668		pr_warn("Large number of MPIDR hash buckets detected\n");
 669	sync_cache_w(&mpidr_hash);
 670}
 671#endif
 672
 673static void __init setup_processor(void)
 674{
 675	struct proc_info_list *list;
 676
 677	/*
 678	 * locate processor in the list of supported processor
 679	 * types.  The linker builds this table for us from the
 680	 * entries in arch/arm/mm/proc-*.S
 681	 */
 682	list = lookup_processor_type(read_cpuid_id());
 683	if (!list) {
 684		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
 685		       read_cpuid_id());
 686		while (1);
 687	}
 688
 689	cpu_name = list->cpu_name;
 690	__cpu_architecture = __get_cpu_architecture();
 691
 692#ifdef MULTI_CPU
 693	processor = *list->proc;
 694#endif
 695#ifdef MULTI_TLB
 696	cpu_tlb = *list->tlb;
 697#endif
 698#ifdef MULTI_USER
 699	cpu_user = *list->user;
 700#endif
 701#ifdef MULTI_CACHE
 702	cpu_cache = *list->cache;
 703#endif
 704
 705	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
 706		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
 707		proc_arch[cpu_architecture()], get_cr());
 708
 709	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
 710		 list->arch_name, ENDIANNESS);
 711	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
 712		 list->elf_name, ENDIANNESS);
 713	elf_hwcap = list->elf_hwcap;
 714
 715	cpuid_init_hwcaps();
 716	patch_aeabi_idiv();
 717
 718#ifndef CONFIG_ARM_THUMB
 719	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
 720#endif
 721#ifdef CONFIG_MMU
 722	init_default_cache_policy(list->__cpu_mm_mmu_flags);
 723#endif
 724	erratum_a15_798181_init();
 725
 726	elf_hwcap_fixup();
 727
 728	cacheid_init();
 729	cpu_init();
 730}
 731
 732void __init dump_machine_table(void)
 733{
 734	const struct machine_desc *p;
 735
 736	early_print("Available machine support:\n\nID (hex)\tNAME\n");
 737	for_each_machine_desc(p)
 738		early_print("%08x\t%s\n", p->nr, p->name);
 739
 740	early_print("\nPlease check your kernel config and/or bootloader.\n");
 741
 742	while (true)
 743		/* can't use cpu_relax() here as it may require MMU setup */;
 744}
 745
 746int __init arm_add_memory(u64 start, u64 size)
 747{
 748	u64 aligned_start;
 
 
 
 
 
 
 749
 750	/*
 751	 * Ensure that start/size are aligned to a page boundary.
 752	 * Size is rounded down, start is rounded up.
 753	 */
 754	aligned_start = PAGE_ALIGN(start);
 755	if (aligned_start > start + size)
 756		size = 0;
 757	else
 758		size -= aligned_start - start;
 759
 760#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
 761	if (aligned_start > ULONG_MAX) {
 762		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
 763			(long long)start);
 764		return -EINVAL;
 765	}
 766
 767	if (aligned_start + size > ULONG_MAX) {
 768		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
 769			(long long)start);
 
 770		/*
 771		 * To ensure bank->start + bank->size is representable in
 772		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
 773		 * This means we lose a page after masking.
 774		 */
 775		size = ULONG_MAX - aligned_start;
 776	}
 777#endif
 778
 779	if (aligned_start < PHYS_OFFSET) {
 780		if (aligned_start + size <= PHYS_OFFSET) {
 781			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 782				aligned_start, aligned_start + size);
 783			return -EINVAL;
 784		}
 785
 786		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
 787			aligned_start, (u64)PHYS_OFFSET);
 788
 789		size -= PHYS_OFFSET - aligned_start;
 790		aligned_start = PHYS_OFFSET;
 791	}
 792
 793	start = aligned_start;
 794	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
 795
 796	/*
 797	 * Check whether this memory region has non-zero size or
 798	 * invalid node number.
 799	 */
 800	if (size == 0)
 801		return -EINVAL;
 802
 803	memblock_add(start, size);
 804	return 0;
 805}
 806
 807/*
 808 * Pick out the memory size.  We look for mem=size@start,
 809 * where start and size are "size[KkMm]"
 810 */
 811
 812static int __init early_mem(char *p)
 813{
 814	static int usermem __initdata = 0;
 815	u64 size;
 816	u64 start;
 817	char *endp;
 818
 819	/*
 820	 * If the user specifies memory size, we
 821	 * blow away any automatically generated
 822	 * size.
 823	 */
 824	if (usermem == 0) {
 825		usermem = 1;
 826		memblock_remove(memblock_start_of_DRAM(),
 827			memblock_end_of_DRAM() - memblock_start_of_DRAM());
 828	}
 829
 830	start = PHYS_OFFSET;
 831	size  = memparse(p, &endp);
 832	if (*endp == '@')
 833		start = memparse(endp + 1, NULL);
 834
 835	arm_add_memory(start, size);
 836
 837	return 0;
 838}
 839early_param("mem", early_mem);
 840
 841static void __init request_standard_resources(const struct machine_desc *mdesc)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 842{
 843	struct memblock_region *region;
 844	struct resource *res;
 845
 846	kernel_code.start   = virt_to_phys(_text);
 847	kernel_code.end     = virt_to_phys(_etext - 1);
 848	kernel_data.start   = virt_to_phys(_sdata);
 849	kernel_data.end     = virt_to_phys(_end - 1);
 850
 851	for_each_memblock(memory, region) {
 852		res = memblock_virt_alloc(sizeof(*res), 0);
 853		res->name  = "System RAM";
 854		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
 855		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 856		res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
 857
 858		request_resource(&iomem_resource, res);
 859
 860		if (kernel_code.start >= res->start &&
 861		    kernel_code.end <= res->end)
 862			request_resource(res, &kernel_code);
 863		if (kernel_data.start >= res->start &&
 864		    kernel_data.end <= res->end)
 865			request_resource(res, &kernel_data);
 866	}
 867
 868	if (mdesc->video_start) {
 869		video_ram.start = mdesc->video_start;
 870		video_ram.end   = mdesc->video_end;
 871		request_resource(&iomem_resource, &video_ram);
 872	}
 873
 874	/*
 875	 * Some machines don't have the possibility of ever
 876	 * possessing lp0, lp1 or lp2
 877	 */
 878	if (mdesc->reserve_lp0)
 879		request_resource(&ioport_resource, &lp0);
 880	if (mdesc->reserve_lp1)
 881		request_resource(&ioport_resource, &lp1);
 882	if (mdesc->reserve_lp2)
 883		request_resource(&ioport_resource, &lp2);
 884}
 885
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 886#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
 887struct screen_info screen_info = {
 888 .orig_video_lines	= 30,
 889 .orig_video_cols	= 80,
 890 .orig_video_mode	= 0,
 891 .orig_video_ega_bx	= 0,
 892 .orig_video_isVGA	= 1,
 893 .orig_video_points	= 8
 894};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 895#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896
 897static int __init customize_machine(void)
 898{
 899	/*
 900	 * customizes platform devices, or adds new ones
 901	 * On DT based machines, we fall back to populating the
 902	 * machine from the device tree, if no callback is provided,
 903	 * otherwise we would always need an init_machine callback.
 904	 */
 905	of_iommu_init();
 906	if (machine_desc->init_machine)
 907		machine_desc->init_machine();
 908#ifdef CONFIG_OF
 909	else
 910		of_platform_populate(NULL, of_default_bus_match_table,
 911					NULL, NULL);
 912#endif
 913	return 0;
 914}
 915arch_initcall(customize_machine);
 916
 917static int __init init_machine_late(void)
 918{
 919	struct device_node *root;
 920	int ret;
 921
 922	if (machine_desc->init_late)
 923		machine_desc->init_late();
 924
 925	root = of_find_node_by_path("/");
 926	if (root) {
 927		ret = of_property_read_string(root, "serial-number",
 928					      &system_serial);
 929		if (ret)
 930			system_serial = NULL;
 931	}
 932
 933	if (!system_serial)
 934		system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
 935					  system_serial_high,
 936					  system_serial_low);
 937
 938	return 0;
 939}
 940late_initcall(init_machine_late);
 941
 942#ifdef CONFIG_KEXEC
 943static inline unsigned long long get_total_mem(void)
 944{
 945	unsigned long total;
 946
 947	total = max_low_pfn - min_low_pfn;
 948	return total << PAGE_SHIFT;
 949}
 950
 951/**
 952 * reserve_crashkernel() - reserves memory are for crash kernel
 953 *
 954 * This function reserves memory area given in "crashkernel=" kernel command
 955 * line parameter. The memory reserved is used by a dump capture kernel when
 956 * primary kernel is crashing.
 957 */
 958static void __init reserve_crashkernel(void)
 959{
 960	unsigned long long crash_size, crash_base;
 961	unsigned long long total_mem;
 962	int ret;
 963
 964	total_mem = get_total_mem();
 965	ret = parse_crashkernel(boot_command_line, total_mem,
 966				&crash_size, &crash_base);
 967	if (ret)
 968		return;
 969
 970	ret = memblock_reserve(crash_base, crash_size);
 971	if (ret < 0) {
 972		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
 973			(unsigned long)crash_base);
 974		return;
 975	}
 976
 977	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
 978		(unsigned long)(crash_size >> 20),
 979		(unsigned long)(crash_base >> 20),
 980		(unsigned long)(total_mem >> 20));
 
 981
 982	crashk_res.start = crash_base;
 983	crashk_res.end = crash_base + crash_size - 1;
 984	insert_resource(&iomem_resource, &crashk_res);
 985}
 986#else
 987static inline void reserve_crashkernel(void) {}
 988#endif /* CONFIG_KEXEC */
 989
 990void __init hyp_mode_check(void)
 991{
 992#ifdef CONFIG_ARM_VIRT_EXT
 993	sync_boot_mode();
 
 
 994
 995	if (is_hyp_mode_available()) {
 996		pr_info("CPU: All CPU(s) started in HYP mode.\n");
 997		pr_info("CPU: Virtualization extensions available.\n");
 998	} else if (is_hyp_mode_mismatched()) {
 999		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1000			__boot_cpu_mode & MODE_MASK);
1001		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1002	} else
1003		pr_info("CPU: All CPU(s) started in SVC mode.\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1004#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1005}
1006
1007void __init setup_arch(char **cmdline_p)
1008{
1009	const struct machine_desc *mdesc;
1010
1011	setup_processor();
1012	mdesc = setup_machine_fdt(__atags_pointer);
1013	if (!mdesc)
1014		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1015	machine_desc = mdesc;
1016	machine_name = mdesc->name;
1017	dump_stack_set_arch_desc("%s", mdesc->name);
1018
1019	if (mdesc->reboot_mode != REBOOT_HARD)
1020		reboot_mode = mdesc->reboot_mode;
 
 
1021
1022	init_mm.start_code = (unsigned long) _text;
1023	init_mm.end_code   = (unsigned long) _etext;
1024	init_mm.end_data   = (unsigned long) _edata;
1025	init_mm.brk	   = (unsigned long) _end;
1026
1027	/* populate cmd_line too for later use, preserving boot_command_line */
1028	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1029	*cmdline_p = cmd_line;
1030
1031	early_fixmap_init();
1032	early_ioremap_init();
1033
1034	parse_early_param();
1035
1036#ifdef CONFIG_MMU
1037	early_paging_init(mdesc);
1038#endif
1039	setup_dma_zone(mdesc);
1040	efi_init();
1041	sanity_check_meminfo();
1042	arm_memblock_init(mdesc);
1043
1044	early_ioremap_reset();
1045
1046	paging_init(mdesc);
1047	request_standard_resources(mdesc);
1048
1049	if (mdesc->restart)
1050		arm_pm_restart = mdesc->restart;
1051
1052	unflatten_device_tree();
1053
1054	arm_dt_init_cpu_maps();
1055	psci_dt_init();
1056	xen_early_init();
1057#ifdef CONFIG_SMP
1058	if (is_smp()) {
1059		if (!mdesc->smp_init || !mdesc->smp_init()) {
1060			if (psci_smp_available())
1061				smp_set_ops(&psci_smp_ops);
1062			else if (mdesc->smp)
1063				smp_set_ops(mdesc->smp);
1064		}
1065		smp_init_cpus();
1066		smp_build_mpidr_hash();
1067	}
1068#endif
1069
1070	if (!is_smp())
1071		hyp_mode_check();
1072
1073	reserve_crashkernel();
1074
 
 
1075#ifdef CONFIG_MULTI_IRQ_HANDLER
1076	handle_arch_irq = mdesc->handle_irq;
1077#endif
1078
1079#ifdef CONFIG_VT
1080#if defined(CONFIG_VGA_CONSOLE)
1081	conswitchp = &vga_con;
1082#elif defined(CONFIG_DUMMY_CONSOLE)
1083	conswitchp = &dummy_con;
1084#endif
1085#endif
1086
1087	if (mdesc->init_early)
1088		mdesc->init_early();
1089}
1090
1091
1092static int __init topology_init(void)
1093{
1094	int cpu;
1095
1096	for_each_possible_cpu(cpu) {
1097		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1098		cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1099		register_cpu(&cpuinfo->cpu, cpu);
1100	}
1101
1102	return 0;
1103}
1104subsys_initcall(topology_init);
1105
1106#ifdef CONFIG_HAVE_PROC_CPU
1107static int __init proc_cpu_init(void)
1108{
1109	struct proc_dir_entry *res;
1110
1111	res = proc_mkdir("cpu", NULL);
1112	if (!res)
1113		return -ENOMEM;
1114	return 0;
1115}
1116fs_initcall(proc_cpu_init);
1117#endif
1118
1119static const char *hwcap_str[] = {
1120	"swp",
1121	"half",
1122	"thumb",
1123	"26bit",
1124	"fastmult",
1125	"fpa",
1126	"vfp",
1127	"edsp",
1128	"java",
1129	"iwmmxt",
1130	"crunch",
1131	"thumbee",
1132	"neon",
1133	"vfpv3",
1134	"vfpv3d16",
1135	"tls",
1136	"vfpv4",
1137	"idiva",
1138	"idivt",
1139	"vfpd32",
1140	"lpae",
1141	"evtstrm",
1142	NULL
1143};
1144
1145static const char *hwcap2_str[] = {
1146	"aes",
1147	"pmull",
1148	"sha1",
1149	"sha2",
1150	"crc32",
1151	NULL
1152};
1153
1154static int c_show(struct seq_file *m, void *v)
1155{
1156	int i, j;
1157	u32 cpuid;
 
 
1158
 
1159	for_each_online_cpu(i) {
1160		/*
1161		 * glibc reads /proc/cpuinfo to determine the number of
1162		 * online processors, looking for lines beginning with
1163		 * "processor".  Give glibc what it expects.
1164		 */
1165		seq_printf(m, "processor\t: %d\n", i);
1166		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1167		seq_printf(m, "model name\t: %s rev %d (%s)\n",
1168			   cpu_name, cpuid & 15, elf_platform);
1169
1170#if defined(CONFIG_SMP)
1171		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1172			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1173			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1174#else
1175		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1176			   loops_per_jiffy / (500000/HZ),
1177			   (loops_per_jiffy / (5000/HZ)) % 100);
1178#endif
1179		/* dump out the processor features */
1180		seq_puts(m, "Features\t: ");
1181
1182		for (j = 0; hwcap_str[j]; j++)
1183			if (elf_hwcap & (1 << j))
1184				seq_printf(m, "%s ", hwcap_str[j]);
1185
1186		for (j = 0; hwcap2_str[j]; j++)
1187			if (elf_hwcap2 & (1 << j))
1188				seq_printf(m, "%s ", hwcap2_str[j]);
1189
1190		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1191		seq_printf(m, "CPU architecture: %s\n",
1192			   proc_arch[cpu_architecture()]);
1193
1194		if ((cpuid & 0x0008f000) == 0x00000000) {
1195			/* pre-ARM7 */
1196			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
 
 
1197		} else {
1198			if ((cpuid & 0x0008f000) == 0x00007000) {
1199				/* ARM7 */
1200				seq_printf(m, "CPU variant\t: 0x%02x\n",
1201					   (cpuid >> 16) & 127);
1202			} else {
1203				/* post-ARM7 */
1204				seq_printf(m, "CPU variant\t: 0x%x\n",
1205					   (cpuid >> 20) & 15);
1206			}
1207			seq_printf(m, "CPU part\t: 0x%03x\n",
1208				   (cpuid >> 4) & 0xfff);
1209		}
1210		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
 
1211	}
 
 
 
1212
1213	seq_printf(m, "Hardware\t: %s\n", machine_name);
1214	seq_printf(m, "Revision\t: %04x\n", system_rev);
1215	seq_printf(m, "Serial\t\t: %s\n", system_serial);
 
1216
1217	return 0;
1218}
1219
1220static void *c_start(struct seq_file *m, loff_t *pos)
1221{
1222	return *pos < 1 ? (void *)1 : NULL;
1223}
1224
1225static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1226{
1227	++*pos;
1228	return NULL;
1229}
1230
1231static void c_stop(struct seq_file *m, void *v)
1232{
1233}
1234
1235const struct seq_operations cpuinfo_op = {
1236	.start	= c_start,
1237	.next	= c_next,
1238	.stop	= c_stop,
1239	.show	= c_show
1240};
v3.5.6
   1/*
   2 *  linux/arch/arm/kernel/setup.c
   3 *
   4 *  Copyright (C) 1995-2001 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
 
  10#include <linux/export.h>
  11#include <linux/kernel.h>
  12#include <linux/stddef.h>
  13#include <linux/ioport.h>
  14#include <linux/delay.h>
  15#include <linux/utsname.h>
  16#include <linux/initrd.h>
  17#include <linux/console.h>
  18#include <linux/bootmem.h>
  19#include <linux/seq_file.h>
  20#include <linux/screen_info.h>
 
 
  21#include <linux/init.h>
  22#include <linux/kexec.h>
  23#include <linux/of_fdt.h>
  24#include <linux/root_dev.h>
  25#include <linux/cpu.h>
  26#include <linux/interrupt.h>
  27#include <linux/smp.h>
  28#include <linux/fs.h>
  29#include <linux/proc_fs.h>
  30#include <linux/memblock.h>
  31#include <linux/bug.h>
  32#include <linux/compiler.h>
  33#include <linux/sort.h>
 
  34
  35#include <asm/unified.h>
  36#include <asm/cp15.h>
  37#include <asm/cpu.h>
  38#include <asm/cputype.h>
 
  39#include <asm/elf.h>
 
 
  40#include <asm/procinfo.h>
 
  41#include <asm/sections.h>
  42#include <asm/setup.h>
  43#include <asm/smp_plat.h>
  44#include <asm/mach-types.h>
  45#include <asm/cacheflush.h>
  46#include <asm/cachetype.h>
  47#include <asm/tlbflush.h>
 
  48
  49#include <asm/prom.h>
  50#include <asm/mach/arch.h>
  51#include <asm/mach/irq.h>
  52#include <asm/mach/time.h>
  53#include <asm/system_info.h>
  54#include <asm/system_misc.h>
  55#include <asm/traps.h>
  56#include <asm/unwind.h>
  57#include <asm/memblock.h>
 
  58
  59#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
  60#include "compat.h"
  61#endif
  62#include "atags.h"
  63#include "tcm.h"
  64
  65#ifndef MEM_SIZE
  66#define MEM_SIZE	(16*1024*1024)
  67#endif
  68
  69#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
  70char fpe_type[8];
  71
  72static int __init fpe_setup(char *line)
  73{
  74	memcpy(fpe_type, line, 8);
  75	return 1;
  76}
  77
  78__setup("fpe=", fpe_setup);
  79#endif
  80
  81extern void paging_init(struct machine_desc *desc);
 
 
  82extern void sanity_check_meminfo(void);
  83extern void reboot_setup(char *str);
  84extern void setup_dma_zone(struct machine_desc *desc);
  85
  86unsigned int processor_id;
  87EXPORT_SYMBOL(processor_id);
  88unsigned int __machine_arch_type __read_mostly;
  89EXPORT_SYMBOL(__machine_arch_type);
  90unsigned int cacheid __read_mostly;
  91EXPORT_SYMBOL(cacheid);
  92
  93unsigned int __atags_pointer __initdata;
  94
  95unsigned int system_rev;
  96EXPORT_SYMBOL(system_rev);
  97
 
 
 
  98unsigned int system_serial_low;
  99EXPORT_SYMBOL(system_serial_low);
 100
 101unsigned int system_serial_high;
 102EXPORT_SYMBOL(system_serial_high);
 103
 104unsigned int elf_hwcap __read_mostly;
 105EXPORT_SYMBOL(elf_hwcap);
 106
 
 
 
 107
 108#ifdef MULTI_CPU
 109struct processor processor __read_mostly;
 110#endif
 111#ifdef MULTI_TLB
 112struct cpu_tlb_fns cpu_tlb __read_mostly;
 113#endif
 114#ifdef MULTI_USER
 115struct cpu_user_fns cpu_user __read_mostly;
 116#endif
 117#ifdef MULTI_CACHE
 118struct cpu_cache_fns cpu_cache __read_mostly;
 119#endif
 120#ifdef CONFIG_OUTER_CACHE
 121struct outer_cache_fns outer_cache __read_mostly;
 122EXPORT_SYMBOL(outer_cache);
 123#endif
 124
 125/*
 126 * Cached cpu_architecture() result for use by assembler code.
 127 * C code should use the cpu_architecture() function instead of accessing this
 128 * variable directly.
 129 */
 130int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
 131
 132struct stack {
 133	u32 irq[3];
 134	u32 abt[3];
 135	u32 und[3];
 
 136} ____cacheline_aligned;
 137
 
 138static struct stack stacks[NR_CPUS];
 
 139
 140char elf_platform[ELF_PLATFORM_SIZE];
 141EXPORT_SYMBOL(elf_platform);
 142
 143static const char *cpu_name;
 144static const char *machine_name;
 145static char __initdata cmd_line[COMMAND_LINE_SIZE];
 146struct machine_desc *machine_desc __initdata;
 147
 148static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
 149static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
 150#define ENDIANNESS ((char)endian_test.l)
 151
 152DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
 153
 154/*
 155 * Standard memory resources
 156 */
 157static struct resource mem_res[] = {
 158	{
 159		.name = "Video RAM",
 160		.start = 0,
 161		.end = 0,
 162		.flags = IORESOURCE_MEM
 163	},
 164	{
 165		.name = "Kernel code",
 166		.start = 0,
 167		.end = 0,
 168		.flags = IORESOURCE_MEM
 169	},
 170	{
 171		.name = "Kernel data",
 172		.start = 0,
 173		.end = 0,
 174		.flags = IORESOURCE_MEM
 175	}
 176};
 177
 178#define video_ram   mem_res[0]
 179#define kernel_code mem_res[1]
 180#define kernel_data mem_res[2]
 181
 182static struct resource io_res[] = {
 183	{
 184		.name = "reserved",
 185		.start = 0x3bc,
 186		.end = 0x3be,
 187		.flags = IORESOURCE_IO | IORESOURCE_BUSY
 188	},
 189	{
 190		.name = "reserved",
 191		.start = 0x378,
 192		.end = 0x37f,
 193		.flags = IORESOURCE_IO | IORESOURCE_BUSY
 194	},
 195	{
 196		.name = "reserved",
 197		.start = 0x278,
 198		.end = 0x27f,
 199		.flags = IORESOURCE_IO | IORESOURCE_BUSY
 200	}
 201};
 202
 203#define lp0 io_res[0]
 204#define lp1 io_res[1]
 205#define lp2 io_res[2]
 206
 207static const char *proc_arch[] = {
 208	"undefined/unknown",
 209	"3",
 210	"4",
 211	"4T",
 212	"5",
 213	"5T",
 214	"5TE",
 215	"5TEJ",
 216	"6TEJ",
 217	"7",
 218	"?(11)",
 219	"?(12)",
 220	"?(13)",
 221	"?(14)",
 222	"?(15)",
 223	"?(16)",
 224	"?(17)",
 225};
 226
 
 
 
 
 
 
 227static int __get_cpu_architecture(void)
 228{
 229	int cpu_arch;
 230
 231	if ((read_cpuid_id() & 0x0008f000) == 0) {
 232		cpu_arch = CPU_ARCH_UNKNOWN;
 233	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
 234		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
 235	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
 236		cpu_arch = (read_cpuid_id() >> 16) & 7;
 237		if (cpu_arch)
 238			cpu_arch += CPU_ARCH_ARMv3;
 239	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
 240		unsigned int mmfr0;
 241
 242		/* Revised CPUID format. Read the Memory Model Feature
 243		 * Register 0 and check for VMSAv7 or PMSAv7 */
 244		asm("mrc	p15, 0, %0, c0, c1, 4"
 245		    : "=r" (mmfr0));
 246		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
 247		    (mmfr0 & 0x000000f0) >= 0x00000030)
 248			cpu_arch = CPU_ARCH_ARMv7;
 249		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
 250			 (mmfr0 & 0x000000f0) == 0x00000020)
 251			cpu_arch = CPU_ARCH_ARMv6;
 252		else
 253			cpu_arch = CPU_ARCH_UNKNOWN;
 254	} else
 255		cpu_arch = CPU_ARCH_UNKNOWN;
 256
 257	return cpu_arch;
 258}
 
 259
 260int __pure cpu_architecture(void)
 261{
 262	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
 263
 264	return __cpu_architecture;
 265}
 266
 267static int cpu_has_aliasing_icache(unsigned int arch)
 268{
 269	int aliasing_icache;
 270	unsigned int id_reg, num_sets, line_size;
 271
 272	/* PIPT caches never alias. */
 273	if (icache_is_pipt())
 274		return 0;
 275
 276	/* arch specifies the register format */
 277	switch (arch) {
 278	case CPU_ARCH_ARMv7:
 279		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
 280		    : /* No output operands */
 281		    : "r" (1));
 282		isb();
 283		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
 284		    : "=r" (id_reg));
 285		line_size = 4 << ((id_reg & 0x7) + 2);
 286		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
 287		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
 288		break;
 289	case CPU_ARCH_ARMv6:
 290		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
 291		break;
 292	default:
 293		/* I-cache aliases will be handled by D-cache aliasing code */
 294		aliasing_icache = 0;
 295	}
 296
 297	return aliasing_icache;
 298}
 299
 300static void __init cacheid_init(void)
 301{
 302	unsigned int cachetype = read_cpuid_cachetype();
 303	unsigned int arch = cpu_architecture();
 304
 305	if (arch >= CPU_ARCH_ARMv6) {
 
 
 
 306		if ((cachetype & (7 << 29)) == 4 << 29) {
 307			/* ARMv7 register format */
 308			arch = CPU_ARCH_ARMv7;
 309			cacheid = CACHEID_VIPT_NONALIASING;
 310			switch (cachetype & (3 << 14)) {
 311			case (1 << 14):
 312				cacheid |= CACHEID_ASID_TAGGED;
 313				break;
 314			case (3 << 14):
 315				cacheid |= CACHEID_PIPT;
 316				break;
 317			}
 318		} else {
 319			arch = CPU_ARCH_ARMv6;
 320			if (cachetype & (1 << 23))
 321				cacheid = CACHEID_VIPT_ALIASING;
 322			else
 323				cacheid = CACHEID_VIPT_NONALIASING;
 324		}
 325		if (cpu_has_aliasing_icache(arch))
 326			cacheid |= CACHEID_VIPT_I_ALIASING;
 327	} else {
 328		cacheid = CACHEID_VIVT;
 329	}
 330
 331	printk("CPU: %s data cache, %s instruction cache\n",
 332		cache_is_vivt() ? "VIVT" :
 333		cache_is_vipt_aliasing() ? "VIPT aliasing" :
 334		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
 335		cache_is_vivt() ? "VIVT" :
 336		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
 337		icache_is_vipt_aliasing() ? "VIPT aliasing" :
 338		icache_is_pipt() ? "PIPT" :
 339		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
 340}
 341
 342/*
 343 * These functions re-use the assembly code in head.S, which
 344 * already provide the required functionality.
 345 */
 346extern struct proc_info_list *lookup_processor_type(unsigned int);
 347
 348void __init early_print(const char *str, ...)
 349{
 350	extern void printascii(const char *);
 351	char buf[256];
 352	va_list ap;
 353
 354	va_start(ap, str);
 355	vsnprintf(buf, sizeof(buf), str, ap);
 356	va_end(ap);
 357
 358#ifdef CONFIG_DEBUG_LL
 359	printascii(buf);
 360#endif
 361	printk("%s", buf);
 362}
 363
 364static void __init feat_v6_fixup(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 365{
 366	int id = read_cpuid_id();
 
 
 
 
 367
 368	if ((id & 0xff0f0000) != 0x41070000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 369		return;
 370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 371	/*
 372	 * HWCAP_TLS is available only on 1136 r1p0 and later,
 373	 * see also kuser_get_tls_init.
 374	 */
 375	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
 
 376		elf_hwcap &= ~HWCAP_TLS;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 377}
 378
 379/*
 380 * cpu_init - initialise one CPU.
 381 *
 382 * cpu_init sets up the per-CPU stacks.
 383 */
 384void cpu_init(void)
 385{
 
 386	unsigned int cpu = smp_processor_id();
 387	struct stack *stk = &stacks[cpu];
 388
 389	if (cpu >= NR_CPUS) {
 390		printk(KERN_CRIT "CPU%u: bad primary CPU number\n", cpu);
 391		BUG();
 392	}
 393
 
 
 
 
 
 
 394	cpu_proc_init();
 395
 396	/*
 397	 * Define the placement constraint for the inline asm directive below.
 398	 * In Thumb-2, msr with an immediate value is not allowed.
 399	 */
 400#ifdef CONFIG_THUMB2_KERNEL
 401#define PLC	"r"
 402#else
 403#define PLC	"I"
 404#endif
 405
 406	/*
 407	 * setup stacks for re-entrant exception handlers
 408	 */
 409	__asm__ (
 410	"msr	cpsr_c, %1\n\t"
 411	"add	r14, %0, %2\n\t"
 412	"mov	sp, r14\n\t"
 413	"msr	cpsr_c, %3\n\t"
 414	"add	r14, %0, %4\n\t"
 415	"mov	sp, r14\n\t"
 416	"msr	cpsr_c, %5\n\t"
 417	"add	r14, %0, %6\n\t"
 418	"mov	sp, r14\n\t"
 419	"msr	cpsr_c, %7"
 
 
 
 420	    :
 421	    : "r" (stk),
 422	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
 423	      "I" (offsetof(struct stack, irq[0])),
 424	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
 425	      "I" (offsetof(struct stack, abt[0])),
 426	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
 427	      "I" (offsetof(struct stack, und[0])),
 
 
 428	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
 429	    : "r14");
 
 430}
 431
 432int __cpu_logical_map[NR_CPUS];
 433
 434void __init smp_setup_processor_id(void)
 435{
 436	int i;
 437	u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
 
 438
 439	cpu_logical_map(0) = cpu;
 440	for (i = 1; i < NR_CPUS; ++i)
 441		cpu_logical_map(i) = i == cpu ? 0 : i;
 442
 443	printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 444}
 
 445
 446static void __init setup_processor(void)
 447{
 448	struct proc_info_list *list;
 449
 450	/*
 451	 * locate processor in the list of supported processor
 452	 * types.  The linker builds this table for us from the
 453	 * entries in arch/arm/mm/proc-*.S
 454	 */
 455	list = lookup_processor_type(read_cpuid_id());
 456	if (!list) {
 457		printk("CPU configuration botched (ID %08x), unable "
 458		       "to continue.\n", read_cpuid_id());
 459		while (1);
 460	}
 461
 462	cpu_name = list->cpu_name;
 463	__cpu_architecture = __get_cpu_architecture();
 464
 465#ifdef MULTI_CPU
 466	processor = *list->proc;
 467#endif
 468#ifdef MULTI_TLB
 469	cpu_tlb = *list->tlb;
 470#endif
 471#ifdef MULTI_USER
 472	cpu_user = *list->user;
 473#endif
 474#ifdef MULTI_CACHE
 475	cpu_cache = *list->cache;
 476#endif
 477
 478	printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
 479	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
 480	       proc_arch[cpu_architecture()], cr_alignment);
 481
 482	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
 483		 list->arch_name, ENDIANNESS);
 484	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
 485		 list->elf_name, ENDIANNESS);
 486	elf_hwcap = list->elf_hwcap;
 
 
 
 
 487#ifndef CONFIG_ARM_THUMB
 488	elf_hwcap &= ~HWCAP_THUMB;
 489#endif
 
 
 
 
 490
 491	feat_v6_fixup();
 492
 493	cacheid_init();
 494	cpu_init();
 495}
 496
 497void __init dump_machine_table(void)
 498{
 499	struct machine_desc *p;
 500
 501	early_print("Available machine support:\n\nID (hex)\tNAME\n");
 502	for_each_machine_desc(p)
 503		early_print("%08x\t%s\n", p->nr, p->name);
 504
 505	early_print("\nPlease check your kernel config and/or bootloader.\n");
 506
 507	while (true)
 508		/* can't use cpu_relax() here as it may require MMU setup */;
 509}
 510
 511int __init arm_add_memory(phys_addr_t start, unsigned long size)
 512{
 513	struct membank *bank = &meminfo.bank[meminfo.nr_banks];
 514
 515	if (meminfo.nr_banks >= NR_BANKS) {
 516		printk(KERN_CRIT "NR_BANKS too low, "
 517			"ignoring memory at 0x%08llx\n", (long long)start);
 518		return -EINVAL;
 519	}
 520
 521	/*
 522	 * Ensure that start/size are aligned to a page boundary.
 523	 * Size is appropriately rounded down, start is rounded up.
 524	 */
 525	size -= start & ~PAGE_MASK;
 526	bank->start = PAGE_ALIGN(start);
 
 
 
 
 
 
 
 
 
 
 527
 528#ifndef CONFIG_LPAE
 529	if (bank->start + size < bank->start) {
 530		printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
 531			"32-bit physical address space\n", (long long)start);
 532		/*
 533		 * To ensure bank->start + bank->size is representable in
 534		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
 535		 * This means we lose a page after masking.
 536		 */
 537		size = ULONG_MAX - bank->start;
 538	}
 539#endif
 540
 541	bank->size = size & PAGE_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 542
 543	/*
 544	 * Check whether this memory region has non-zero size or
 545	 * invalid node number.
 546	 */
 547	if (bank->size == 0)
 548		return -EINVAL;
 549
 550	meminfo.nr_banks++;
 551	return 0;
 552}
 553
 554/*
 555 * Pick out the memory size.  We look for mem=size@start,
 556 * where start and size are "size[KkMm]"
 557 */
 
 558static int __init early_mem(char *p)
 559{
 560	static int usermem __initdata = 0;
 561	unsigned long size;
 562	phys_addr_t start;
 563	char *endp;
 564
 565	/*
 566	 * If the user specifies memory size, we
 567	 * blow away any automatically generated
 568	 * size.
 569	 */
 570	if (usermem == 0) {
 571		usermem = 1;
 572		meminfo.nr_banks = 0;
 
 573	}
 574
 575	start = PHYS_OFFSET;
 576	size  = memparse(p, &endp);
 577	if (*endp == '@')
 578		start = memparse(endp + 1, NULL);
 579
 580	arm_add_memory(start, size);
 581
 582	return 0;
 583}
 584early_param("mem", early_mem);
 585
 586static void __init
 587setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz)
 588{
 589#ifdef CONFIG_BLK_DEV_RAM
 590	extern int rd_size, rd_image_start, rd_prompt, rd_doload;
 591
 592	rd_image_start = image_start;
 593	rd_prompt = prompt;
 594	rd_doload = doload;
 595
 596	if (rd_sz)
 597		rd_size = rd_sz;
 598#endif
 599}
 600
 601static void __init request_standard_resources(struct machine_desc *mdesc)
 602{
 603	struct memblock_region *region;
 604	struct resource *res;
 605
 606	kernel_code.start   = virt_to_phys(_text);
 607	kernel_code.end     = virt_to_phys(_etext - 1);
 608	kernel_data.start   = virt_to_phys(_sdata);
 609	kernel_data.end     = virt_to_phys(_end - 1);
 610
 611	for_each_memblock(memory, region) {
 612		res = alloc_bootmem_low(sizeof(*res));
 613		res->name  = "System RAM";
 614		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
 615		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
 616		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 617
 618		request_resource(&iomem_resource, res);
 619
 620		if (kernel_code.start >= res->start &&
 621		    kernel_code.end <= res->end)
 622			request_resource(res, &kernel_code);
 623		if (kernel_data.start >= res->start &&
 624		    kernel_data.end <= res->end)
 625			request_resource(res, &kernel_data);
 626	}
 627
 628	if (mdesc->video_start) {
 629		video_ram.start = mdesc->video_start;
 630		video_ram.end   = mdesc->video_end;
 631		request_resource(&iomem_resource, &video_ram);
 632	}
 633
 634	/*
 635	 * Some machines don't have the possibility of ever
 636	 * possessing lp0, lp1 or lp2
 637	 */
 638	if (mdesc->reserve_lp0)
 639		request_resource(&ioport_resource, &lp0);
 640	if (mdesc->reserve_lp1)
 641		request_resource(&ioport_resource, &lp1);
 642	if (mdesc->reserve_lp2)
 643		request_resource(&ioport_resource, &lp2);
 644}
 645
 646/*
 647 *  Tag parsing.
 648 *
 649 * This is the new way of passing data to the kernel at boot time.  Rather
 650 * than passing a fixed inflexible structure to the kernel, we pass a list
 651 * of variable-sized tags to the kernel.  The first tag must be a ATAG_CORE
 652 * tag for the list to be recognised (to distinguish the tagged list from
 653 * a param_struct).  The list is terminated with a zero-length tag (this tag
 654 * is not parsed in any way).
 655 */
 656static int __init parse_tag_core(const struct tag *tag)
 657{
 658	if (tag->hdr.size > 2) {
 659		if ((tag->u.core.flags & 1) == 0)
 660			root_mountflags &= ~MS_RDONLY;
 661		ROOT_DEV = old_decode_dev(tag->u.core.rootdev);
 662	}
 663	return 0;
 664}
 665
 666__tagtable(ATAG_CORE, parse_tag_core);
 667
 668static int __init parse_tag_mem32(const struct tag *tag)
 669{
 670	return arm_add_memory(tag->u.mem.start, tag->u.mem.size);
 671}
 672
 673__tagtable(ATAG_MEM, parse_tag_mem32);
 674
 675#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
 676struct screen_info screen_info = {
 677 .orig_video_lines	= 30,
 678 .orig_video_cols	= 80,
 679 .orig_video_mode	= 0,
 680 .orig_video_ega_bx	= 0,
 681 .orig_video_isVGA	= 1,
 682 .orig_video_points	= 8
 683};
 684
 685static int __init parse_tag_videotext(const struct tag *tag)
 686{
 687	screen_info.orig_x            = tag->u.videotext.x;
 688	screen_info.orig_y            = tag->u.videotext.y;
 689	screen_info.orig_video_page   = tag->u.videotext.video_page;
 690	screen_info.orig_video_mode   = tag->u.videotext.video_mode;
 691	screen_info.orig_video_cols   = tag->u.videotext.video_cols;
 692	screen_info.orig_video_ega_bx = tag->u.videotext.video_ega_bx;
 693	screen_info.orig_video_lines  = tag->u.videotext.video_lines;
 694	screen_info.orig_video_isVGA  = tag->u.videotext.video_isvga;
 695	screen_info.orig_video_points = tag->u.videotext.video_points;
 696	return 0;
 697}
 698
 699__tagtable(ATAG_VIDEOTEXT, parse_tag_videotext);
 700#endif
 701
 702static int __init parse_tag_ramdisk(const struct tag *tag)
 703{
 704	setup_ramdisk((tag->u.ramdisk.flags & 1) == 0,
 705		      (tag->u.ramdisk.flags & 2) == 0,
 706		      tag->u.ramdisk.start, tag->u.ramdisk.size);
 707	return 0;
 708}
 709
 710__tagtable(ATAG_RAMDISK, parse_tag_ramdisk);
 711
 712static int __init parse_tag_serialnr(const struct tag *tag)
 713{
 714	system_serial_low = tag->u.serialnr.low;
 715	system_serial_high = tag->u.serialnr.high;
 716	return 0;
 717}
 718
 719__tagtable(ATAG_SERIAL, parse_tag_serialnr);
 720
 721static int __init parse_tag_revision(const struct tag *tag)
 722{
 723	system_rev = tag->u.revision.rev;
 724	return 0;
 725}
 726
 727__tagtable(ATAG_REVISION, parse_tag_revision);
 728
 729static int __init parse_tag_cmdline(const struct tag *tag)
 730{
 731#if defined(CONFIG_CMDLINE_EXTEND)
 732	strlcat(default_command_line, " ", COMMAND_LINE_SIZE);
 733	strlcat(default_command_line, tag->u.cmdline.cmdline,
 734		COMMAND_LINE_SIZE);
 735#elif defined(CONFIG_CMDLINE_FORCE)
 736	pr_warning("Ignoring tag cmdline (using the default kernel command line)\n");
 737#else
 738	strlcpy(default_command_line, tag->u.cmdline.cmdline,
 739		COMMAND_LINE_SIZE);
 740#endif
 741	return 0;
 742}
 743
 744__tagtable(ATAG_CMDLINE, parse_tag_cmdline);
 745
 746/*
 747 * Scan the tag table for this tag, and call its parse function.
 748 * The tag table is built by the linker from all the __tagtable
 749 * declarations.
 750 */
 751static int __init parse_tag(const struct tag *tag)
 752{
 753	extern struct tagtable __tagtable_begin, __tagtable_end;
 754	struct tagtable *t;
 755
 756	for (t = &__tagtable_begin; t < &__tagtable_end; t++)
 757		if (tag->hdr.tag == t->tag) {
 758			t->parse(tag);
 759			break;
 760		}
 761
 762	return t < &__tagtable_end;
 763}
 764
 765/*
 766 * Parse all tags in the list, checking both the global and architecture
 767 * specific tag tables.
 768 */
 769static void __init parse_tags(const struct tag *t)
 770{
 771	for (; t->hdr.size; t = tag_next(t))
 772		if (!parse_tag(t))
 773			printk(KERN_WARNING
 774				"Ignoring unrecognised tag 0x%08x\n",
 775				t->hdr.tag);
 776}
 777
 778/*
 779 * This holds our defaults.
 780 */
 781static struct init_tags {
 782	struct tag_header hdr1;
 783	struct tag_core   core;
 784	struct tag_header hdr2;
 785	struct tag_mem32  mem;
 786	struct tag_header hdr3;
 787} init_tags __initdata = {
 788	{ tag_size(tag_core), ATAG_CORE },
 789	{ 1, PAGE_SIZE, 0xff },
 790	{ tag_size(tag_mem32), ATAG_MEM },
 791	{ MEM_SIZE },
 792	{ 0, ATAG_NONE }
 793};
 794
 795static int __init customize_machine(void)
 796{
 797	/* customizes platform devices, or adds new ones */
 
 
 
 
 
 
 798	if (machine_desc->init_machine)
 799		machine_desc->init_machine();
 
 
 
 
 
 800	return 0;
 801}
 802arch_initcall(customize_machine);
 803
 804static int __init init_machine_late(void)
 805{
 
 
 
 806	if (machine_desc->init_late)
 807		machine_desc->init_late();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 808	return 0;
 809}
 810late_initcall(init_machine_late);
 811
 812#ifdef CONFIG_KEXEC
 813static inline unsigned long long get_total_mem(void)
 814{
 815	unsigned long total;
 816
 817	total = max_low_pfn - min_low_pfn;
 818	return total << PAGE_SHIFT;
 819}
 820
 821/**
 822 * reserve_crashkernel() - reserves memory are for crash kernel
 823 *
 824 * This function reserves memory area given in "crashkernel=" kernel command
 825 * line parameter. The memory reserved is used by a dump capture kernel when
 826 * primary kernel is crashing.
 827 */
 828static void __init reserve_crashkernel(void)
 829{
 830	unsigned long long crash_size, crash_base;
 831	unsigned long long total_mem;
 832	int ret;
 833
 834	total_mem = get_total_mem();
 835	ret = parse_crashkernel(boot_command_line, total_mem,
 836				&crash_size, &crash_base);
 837	if (ret)
 838		return;
 839
 840	ret = reserve_bootmem(crash_base, crash_size, BOOTMEM_EXCLUSIVE);
 841	if (ret < 0) {
 842		printk(KERN_WARNING "crashkernel reservation failed - "
 843		       "memory is in use (0x%lx)\n", (unsigned long)crash_base);
 844		return;
 845	}
 846
 847	printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
 848	       "for crashkernel (System RAM: %ldMB)\n",
 849	       (unsigned long)(crash_size >> 20),
 850	       (unsigned long)(crash_base >> 20),
 851	       (unsigned long)(total_mem >> 20));
 852
 853	crashk_res.start = crash_base;
 854	crashk_res.end = crash_base + crash_size - 1;
 855	insert_resource(&iomem_resource, &crashk_res);
 856}
 857#else
 858static inline void reserve_crashkernel(void) {}
 859#endif /* CONFIG_KEXEC */
 860
 861static void __init squash_mem_tags(struct tag *tag)
 862{
 863	for (; tag->hdr.size; tag = tag_next(tag))
 864		if (tag->hdr.tag == ATAG_MEM)
 865			tag->hdr.tag = ATAG_NONE;
 866}
 867
 868static struct machine_desc * __init setup_machine_tags(unsigned int nr)
 869{
 870	struct tag *tags = (struct tag *)&init_tags;
 871	struct machine_desc *mdesc = NULL, *p;
 872	char *from = default_command_line;
 873
 874	init_tags.mem.start = PHYS_OFFSET;
 875
 876	/*
 877	 * locate machine in the list of supported machines.
 878	 */
 879	for_each_machine_desc(p)
 880		if (nr == p->nr) {
 881			printk("Machine: %s\n", p->name);
 882			mdesc = p;
 883			break;
 884		}
 885
 886	if (!mdesc) {
 887		early_print("\nError: unrecognized/unsupported machine ID"
 888			" (r1 = 0x%08x).\n\n", nr);
 889		dump_machine_table(); /* does not return */
 890	}
 891
 892	if (__atags_pointer)
 893		tags = phys_to_virt(__atags_pointer);
 894	else if (mdesc->atag_offset)
 895		tags = (void *)(PAGE_OFFSET + mdesc->atag_offset);
 896
 897#if defined(CONFIG_DEPRECATED_PARAM_STRUCT)
 898	/*
 899	 * If we have the old style parameters, convert them to
 900	 * a tag list.
 901	 */
 902	if (tags->hdr.tag != ATAG_CORE)
 903		convert_to_tag_list(tags);
 904#endif
 905
 906	if (tags->hdr.tag != ATAG_CORE) {
 907#if defined(CONFIG_OF)
 908		/*
 909		 * If CONFIG_OF is set, then assume this is a reasonably
 910		 * modern system that should pass boot parameters
 911		 */
 912		early_print("Warning: Neither atags nor dtb found\n");
 913#endif
 914		tags = (struct tag *)&init_tags;
 915	}
 916
 917	if (mdesc->fixup)
 918		mdesc->fixup(tags, &from, &meminfo);
 919
 920	if (tags->hdr.tag == ATAG_CORE) {
 921		if (meminfo.nr_banks != 0)
 922			squash_mem_tags(tags);
 923		save_atags(tags);
 924		parse_tags(tags);
 925	}
 926
 927	/* parse_early_param needs a boot_command_line */
 928	strlcpy(boot_command_line, from, COMMAND_LINE_SIZE);
 929
 930	return mdesc;
 931}
 932
 933static int __init meminfo_cmp(const void *_a, const void *_b)
 934{
 935	const struct membank *a = _a, *b = _b;
 936	long cmp = bank_pfn_start(a) - bank_pfn_start(b);
 937	return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
 938}
 939
 940void __init setup_arch(char **cmdline_p)
 941{
 942	struct machine_desc *mdesc;
 943
 944	setup_processor();
 945	mdesc = setup_machine_fdt(__atags_pointer);
 946	if (!mdesc)
 947		mdesc = setup_machine_tags(machine_arch_type);
 948	machine_desc = mdesc;
 949	machine_name = mdesc->name;
 
 950
 951	setup_dma_zone(mdesc);
 952
 953	if (mdesc->restart_mode)
 954		reboot_setup(&mdesc->restart_mode);
 955
 956	init_mm.start_code = (unsigned long) _text;
 957	init_mm.end_code   = (unsigned long) _etext;
 958	init_mm.end_data   = (unsigned long) _edata;
 959	init_mm.brk	   = (unsigned long) _end;
 960
 961	/* populate cmd_line too for later use, preserving boot_command_line */
 962	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
 963	*cmdline_p = cmd_line;
 964
 
 
 
 965	parse_early_param();
 966
 967	sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
 
 
 
 
 968	sanity_check_meminfo();
 969	arm_memblock_init(&meminfo, mdesc);
 
 
 970
 971	paging_init(mdesc);
 972	request_standard_resources(mdesc);
 973
 974	if (mdesc->restart)
 975		arm_pm_restart = mdesc->restart;
 976
 977	unflatten_device_tree();
 978
 
 
 
 979#ifdef CONFIG_SMP
 980	if (is_smp())
 
 
 
 
 
 
 981		smp_init_cpus();
 
 
 982#endif
 
 
 
 
 983	reserve_crashkernel();
 984
 985	tcm_init();
 986
 987#ifdef CONFIG_MULTI_IRQ_HANDLER
 988	handle_arch_irq = mdesc->handle_irq;
 989#endif
 990
 991#ifdef CONFIG_VT
 992#if defined(CONFIG_VGA_CONSOLE)
 993	conswitchp = &vga_con;
 994#elif defined(CONFIG_DUMMY_CONSOLE)
 995	conswitchp = &dummy_con;
 996#endif
 997#endif
 998
 999	if (mdesc->init_early)
1000		mdesc->init_early();
1001}
1002
1003
1004static int __init topology_init(void)
1005{
1006	int cpu;
1007
1008	for_each_possible_cpu(cpu) {
1009		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1010		cpuinfo->cpu.hotpluggable = 1;
1011		register_cpu(&cpuinfo->cpu, cpu);
1012	}
1013
1014	return 0;
1015}
1016subsys_initcall(topology_init);
1017
1018#ifdef CONFIG_HAVE_PROC_CPU
1019static int __init proc_cpu_init(void)
1020{
1021	struct proc_dir_entry *res;
1022
1023	res = proc_mkdir("cpu", NULL);
1024	if (!res)
1025		return -ENOMEM;
1026	return 0;
1027}
1028fs_initcall(proc_cpu_init);
1029#endif
1030
1031static const char *hwcap_str[] = {
1032	"swp",
1033	"half",
1034	"thumb",
1035	"26bit",
1036	"fastmult",
1037	"fpa",
1038	"vfp",
1039	"edsp",
1040	"java",
1041	"iwmmxt",
1042	"crunch",
1043	"thumbee",
1044	"neon",
1045	"vfpv3",
1046	"vfpv3d16",
1047	"tls",
1048	"vfpv4",
1049	"idiva",
1050	"idivt",
 
 
 
 
 
 
 
 
 
 
 
 
1051	NULL
1052};
1053
1054static int c_show(struct seq_file *m, void *v)
1055{
1056	int i;
1057
1058	seq_printf(m, "Processor\t: %s rev %d (%s)\n",
1059		   cpu_name, read_cpuid_id() & 15, elf_platform);
1060
1061#if defined(CONFIG_SMP)
1062	for_each_online_cpu(i) {
1063		/*
1064		 * glibc reads /proc/cpuinfo to determine the number of
1065		 * online processors, looking for lines beginning with
1066		 * "processor".  Give glibc what it expects.
1067		 */
1068		seq_printf(m, "processor\t: %d\n", i);
1069		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n\n",
 
 
 
 
 
1070			   per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1071			   (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1072	}
1073#else /* CONFIG_SMP */
1074	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1075		   loops_per_jiffy / (500000/HZ),
1076		   (loops_per_jiffy / (5000/HZ)) % 100);
1077#endif
1078
1079	/* dump out the processor features */
1080	seq_puts(m, "Features\t: ");
1081
1082	for (i = 0; hwcap_str[i]; i++)
1083		if (elf_hwcap & (1 << i))
1084			seq_printf(m, "%s ", hwcap_str[i]);
1085
1086	seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24);
1087	seq_printf(m, "CPU architecture: %s\n", proc_arch[cpu_architecture()]);
1088
1089	if ((read_cpuid_id() & 0x0008f000) == 0x00000000) {
1090		/* pre-ARM7 */
1091		seq_printf(m, "CPU part\t: %07x\n", read_cpuid_id() >> 4);
1092	} else {
1093		if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
1094			/* ARM7 */
1095			seq_printf(m, "CPU variant\t: 0x%02x\n",
1096				   (read_cpuid_id() >> 16) & 127);
1097		} else {
1098			/* post-ARM7 */
1099			seq_printf(m, "CPU variant\t: 0x%x\n",
1100				   (read_cpuid_id() >> 20) & 15);
 
 
 
 
 
 
 
 
1101		}
1102		seq_printf(m, "CPU part\t: 0x%03x\n",
1103			   (read_cpuid_id() >> 4) & 0xfff);
1104	}
1105	seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15);
1106
1107	seq_puts(m, "\n");
1108
1109	seq_printf(m, "Hardware\t: %s\n", machine_name);
1110	seq_printf(m, "Revision\t: %04x\n", system_rev);
1111	seq_printf(m, "Serial\t\t: %08x%08x\n",
1112		   system_serial_high, system_serial_low);
1113
1114	return 0;
1115}
1116
1117static void *c_start(struct seq_file *m, loff_t *pos)
1118{
1119	return *pos < 1 ? (void *)1 : NULL;
1120}
1121
1122static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1123{
1124	++*pos;
1125	return NULL;
1126}
1127
1128static void c_stop(struct seq_file *m, void *v)
1129{
1130}
1131
1132const struct seq_operations cpuinfo_op = {
1133	.start	= c_start,
1134	.next	= c_next,
1135	.stop	= c_stop,
1136	.show	= c_show
1137};