Loading...
1/*
2 * linux/arch/arm/kernel/setup.c
3 *
4 * Copyright (C) 1995-2001 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/export.h>
11#include <linux/kernel.h>
12#include <linux/stddef.h>
13#include <linux/ioport.h>
14#include <linux/delay.h>
15#include <linux/utsname.h>
16#include <linux/initrd.h>
17#include <linux/console.h>
18#include <linux/bootmem.h>
19#include <linux/seq_file.h>
20#include <linux/screen_info.h>
21#include <linux/of_platform.h>
22#include <linux/init.h>
23#include <linux/kexec.h>
24#include <linux/of_fdt.h>
25#include <linux/cpu.h>
26#include <linux/interrupt.h>
27#include <linux/smp.h>
28#include <linux/proc_fs.h>
29#include <linux/memblock.h>
30#include <linux/bug.h>
31#include <linux/compiler.h>
32#include <linux/sort.h>
33
34#include <asm/unified.h>
35#include <asm/cp15.h>
36#include <asm/cpu.h>
37#include <asm/cputype.h>
38#include <asm/elf.h>
39#include <asm/procinfo.h>
40#include <asm/psci.h>
41#include <asm/sections.h>
42#include <asm/setup.h>
43#include <asm/smp_plat.h>
44#include <asm/mach-types.h>
45#include <asm/cacheflush.h>
46#include <asm/cachetype.h>
47#include <asm/tlbflush.h>
48
49#include <asm/prom.h>
50#include <asm/mach/arch.h>
51#include <asm/mach/irq.h>
52#include <asm/mach/time.h>
53#include <asm/system_info.h>
54#include <asm/system_misc.h>
55#include <asm/traps.h>
56#include <asm/unwind.h>
57#include <asm/memblock.h>
58#include <asm/virt.h>
59
60#include "atags.h"
61
62
63#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
64char fpe_type[8];
65
66static int __init fpe_setup(char *line)
67{
68 memcpy(fpe_type, line, 8);
69 return 1;
70}
71
72__setup("fpe=", fpe_setup);
73#endif
74
75extern void paging_init(const struct machine_desc *desc);
76extern void early_paging_init(const struct machine_desc *,
77 struct proc_info_list *);
78extern void sanity_check_meminfo(void);
79extern enum reboot_mode reboot_mode;
80extern void setup_dma_zone(const struct machine_desc *desc);
81
82unsigned int processor_id;
83EXPORT_SYMBOL(processor_id);
84unsigned int __machine_arch_type __read_mostly;
85EXPORT_SYMBOL(__machine_arch_type);
86unsigned int cacheid __read_mostly;
87EXPORT_SYMBOL(cacheid);
88
89unsigned int __atags_pointer __initdata;
90
91unsigned int system_rev;
92EXPORT_SYMBOL(system_rev);
93
94unsigned int system_serial_low;
95EXPORT_SYMBOL(system_serial_low);
96
97unsigned int system_serial_high;
98EXPORT_SYMBOL(system_serial_high);
99
100unsigned int elf_hwcap __read_mostly;
101EXPORT_SYMBOL(elf_hwcap);
102
103unsigned int elf_hwcap2 __read_mostly;
104EXPORT_SYMBOL(elf_hwcap2);
105
106
107#ifdef MULTI_CPU
108struct processor processor __read_mostly;
109#endif
110#ifdef MULTI_TLB
111struct cpu_tlb_fns cpu_tlb __read_mostly;
112#endif
113#ifdef MULTI_USER
114struct cpu_user_fns cpu_user __read_mostly;
115#endif
116#ifdef MULTI_CACHE
117struct cpu_cache_fns cpu_cache __read_mostly;
118#endif
119#ifdef CONFIG_OUTER_CACHE
120struct outer_cache_fns outer_cache __read_mostly;
121EXPORT_SYMBOL(outer_cache);
122#endif
123
124/*
125 * Cached cpu_architecture() result for use by assembler code.
126 * C code should use the cpu_architecture() function instead of accessing this
127 * variable directly.
128 */
129int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
130
131struct stack {
132 u32 irq[3];
133 u32 abt[3];
134 u32 und[3];
135} ____cacheline_aligned;
136
137#ifndef CONFIG_CPU_V7M
138static struct stack stacks[NR_CPUS];
139#endif
140
141char elf_platform[ELF_PLATFORM_SIZE];
142EXPORT_SYMBOL(elf_platform);
143
144static const char *cpu_name;
145static const char *machine_name;
146static char __initdata cmd_line[COMMAND_LINE_SIZE];
147const struct machine_desc *machine_desc __initdata;
148
149static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
150#define ENDIANNESS ((char)endian_test.l)
151
152DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
153
154/*
155 * Standard memory resources
156 */
157static struct resource mem_res[] = {
158 {
159 .name = "Video RAM",
160 .start = 0,
161 .end = 0,
162 .flags = IORESOURCE_MEM
163 },
164 {
165 .name = "Kernel code",
166 .start = 0,
167 .end = 0,
168 .flags = IORESOURCE_MEM
169 },
170 {
171 .name = "Kernel data",
172 .start = 0,
173 .end = 0,
174 .flags = IORESOURCE_MEM
175 }
176};
177
178#define video_ram mem_res[0]
179#define kernel_code mem_res[1]
180#define kernel_data mem_res[2]
181
182static struct resource io_res[] = {
183 {
184 .name = "reserved",
185 .start = 0x3bc,
186 .end = 0x3be,
187 .flags = IORESOURCE_IO | IORESOURCE_BUSY
188 },
189 {
190 .name = "reserved",
191 .start = 0x378,
192 .end = 0x37f,
193 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194 },
195 {
196 .name = "reserved",
197 .start = 0x278,
198 .end = 0x27f,
199 .flags = IORESOURCE_IO | IORESOURCE_BUSY
200 }
201};
202
203#define lp0 io_res[0]
204#define lp1 io_res[1]
205#define lp2 io_res[2]
206
207static const char *proc_arch[] = {
208 "undefined/unknown",
209 "3",
210 "4",
211 "4T",
212 "5",
213 "5T",
214 "5TE",
215 "5TEJ",
216 "6TEJ",
217 "7",
218 "7M",
219 "?(12)",
220 "?(13)",
221 "?(14)",
222 "?(15)",
223 "?(16)",
224 "?(17)",
225};
226
227#ifdef CONFIG_CPU_V7M
228static int __get_cpu_architecture(void)
229{
230 return CPU_ARCH_ARMv7M;
231}
232#else
233static int __get_cpu_architecture(void)
234{
235 int cpu_arch;
236
237 if ((read_cpuid_id() & 0x0008f000) == 0) {
238 cpu_arch = CPU_ARCH_UNKNOWN;
239 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
240 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
241 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
242 cpu_arch = (read_cpuid_id() >> 16) & 7;
243 if (cpu_arch)
244 cpu_arch += CPU_ARCH_ARMv3;
245 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
246 unsigned int mmfr0;
247
248 /* Revised CPUID format. Read the Memory Model Feature
249 * Register 0 and check for VMSAv7 or PMSAv7 */
250 asm("mrc p15, 0, %0, c0, c1, 4"
251 : "=r" (mmfr0));
252 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
253 (mmfr0 & 0x000000f0) >= 0x00000030)
254 cpu_arch = CPU_ARCH_ARMv7;
255 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
256 (mmfr0 & 0x000000f0) == 0x00000020)
257 cpu_arch = CPU_ARCH_ARMv6;
258 else
259 cpu_arch = CPU_ARCH_UNKNOWN;
260 } else
261 cpu_arch = CPU_ARCH_UNKNOWN;
262
263 return cpu_arch;
264}
265#endif
266
267int __pure cpu_architecture(void)
268{
269 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
270
271 return __cpu_architecture;
272}
273
274static int cpu_has_aliasing_icache(unsigned int arch)
275{
276 int aliasing_icache;
277 unsigned int id_reg, num_sets, line_size;
278
279 /* PIPT caches never alias. */
280 if (icache_is_pipt())
281 return 0;
282
283 /* arch specifies the register format */
284 switch (arch) {
285 case CPU_ARCH_ARMv7:
286 asm("mcr p15, 2, %0, c0, c0, 0 @ set CSSELR"
287 : /* No output operands */
288 : "r" (1));
289 isb();
290 asm("mrc p15, 1, %0, c0, c0, 0 @ read CCSIDR"
291 : "=r" (id_reg));
292 line_size = 4 << ((id_reg & 0x7) + 2);
293 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 break;
296 case CPU_ARCH_ARMv6:
297 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 break;
299 default:
300 /* I-cache aliases will be handled by D-cache aliasing code */
301 aliasing_icache = 0;
302 }
303
304 return aliasing_icache;
305}
306
307static void __init cacheid_init(void)
308{
309 unsigned int arch = cpu_architecture();
310
311 if (arch == CPU_ARCH_ARMv7M) {
312 cacheid = 0;
313 } else if (arch >= CPU_ARCH_ARMv6) {
314 unsigned int cachetype = read_cpuid_cachetype();
315 if ((cachetype & (7 << 29)) == 4 << 29) {
316 /* ARMv7 register format */
317 arch = CPU_ARCH_ARMv7;
318 cacheid = CACHEID_VIPT_NONALIASING;
319 switch (cachetype & (3 << 14)) {
320 case (1 << 14):
321 cacheid |= CACHEID_ASID_TAGGED;
322 break;
323 case (3 << 14):
324 cacheid |= CACHEID_PIPT;
325 break;
326 }
327 } else {
328 arch = CPU_ARCH_ARMv6;
329 if (cachetype & (1 << 23))
330 cacheid = CACHEID_VIPT_ALIASING;
331 else
332 cacheid = CACHEID_VIPT_NONALIASING;
333 }
334 if (cpu_has_aliasing_icache(arch))
335 cacheid |= CACHEID_VIPT_I_ALIASING;
336 } else {
337 cacheid = CACHEID_VIVT;
338 }
339
340 pr_info("CPU: %s data cache, %s instruction cache\n",
341 cache_is_vivt() ? "VIVT" :
342 cache_is_vipt_aliasing() ? "VIPT aliasing" :
343 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
344 cache_is_vivt() ? "VIVT" :
345 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
346 icache_is_vipt_aliasing() ? "VIPT aliasing" :
347 icache_is_pipt() ? "PIPT" :
348 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
349}
350
351/*
352 * These functions re-use the assembly code in head.S, which
353 * already provide the required functionality.
354 */
355extern struct proc_info_list *lookup_processor_type(unsigned int);
356
357void __init early_print(const char *str, ...)
358{
359 extern void printascii(const char *);
360 char buf[256];
361 va_list ap;
362
363 va_start(ap, str);
364 vsnprintf(buf, sizeof(buf), str, ap);
365 va_end(ap);
366
367#ifdef CONFIG_DEBUG_LL
368 printascii(buf);
369#endif
370 printk("%s", buf);
371}
372
373static void __init cpuid_init_hwcaps(void)
374{
375 unsigned int divide_instrs, vmsa;
376
377 if (cpu_architecture() < CPU_ARCH_ARMv7)
378 return;
379
380 divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
381
382 switch (divide_instrs) {
383 case 2:
384 elf_hwcap |= HWCAP_IDIVA;
385 case 1:
386 elf_hwcap |= HWCAP_IDIVT;
387 }
388
389 /* LPAE implies atomic ldrd/strd instructions */
390 vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
391 if (vmsa >= 5)
392 elf_hwcap |= HWCAP_LPAE;
393}
394
395static void __init feat_v6_fixup(void)
396{
397 int id = read_cpuid_id();
398
399 if ((id & 0xff0f0000) != 0x41070000)
400 return;
401
402 /*
403 * HWCAP_TLS is available only on 1136 r1p0 and later,
404 * see also kuser_get_tls_init.
405 */
406 if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
407 elf_hwcap &= ~HWCAP_TLS;
408}
409
410/*
411 * cpu_init - initialise one CPU.
412 *
413 * cpu_init sets up the per-CPU stacks.
414 */
415void notrace cpu_init(void)
416{
417#ifndef CONFIG_CPU_V7M
418 unsigned int cpu = smp_processor_id();
419 struct stack *stk = &stacks[cpu];
420
421 if (cpu >= NR_CPUS) {
422 pr_crit("CPU%u: bad primary CPU number\n", cpu);
423 BUG();
424 }
425
426 /*
427 * This only works on resume and secondary cores. For booting on the
428 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
429 */
430 set_my_cpu_offset(per_cpu_offset(cpu));
431
432 cpu_proc_init();
433
434 /*
435 * Define the placement constraint for the inline asm directive below.
436 * In Thumb-2, msr with an immediate value is not allowed.
437 */
438#ifdef CONFIG_THUMB2_KERNEL
439#define PLC "r"
440#else
441#define PLC "I"
442#endif
443
444 /*
445 * setup stacks for re-entrant exception handlers
446 */
447 __asm__ (
448 "msr cpsr_c, %1\n\t"
449 "add r14, %0, %2\n\t"
450 "mov sp, r14\n\t"
451 "msr cpsr_c, %3\n\t"
452 "add r14, %0, %4\n\t"
453 "mov sp, r14\n\t"
454 "msr cpsr_c, %5\n\t"
455 "add r14, %0, %6\n\t"
456 "mov sp, r14\n\t"
457 "msr cpsr_c, %7"
458 :
459 : "r" (stk),
460 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
461 "I" (offsetof(struct stack, irq[0])),
462 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
463 "I" (offsetof(struct stack, abt[0])),
464 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
465 "I" (offsetof(struct stack, und[0])),
466 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
467 : "r14");
468#endif
469}
470
471u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
472
473void __init smp_setup_processor_id(void)
474{
475 int i;
476 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
477 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
478
479 cpu_logical_map(0) = cpu;
480 for (i = 1; i < nr_cpu_ids; ++i)
481 cpu_logical_map(i) = i == cpu ? 0 : i;
482
483 /*
484 * clear __my_cpu_offset on boot CPU to avoid hang caused by
485 * using percpu variable early, for example, lockdep will
486 * access percpu variable inside lock_release
487 */
488 set_my_cpu_offset(0);
489
490 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
491}
492
493struct mpidr_hash mpidr_hash;
494#ifdef CONFIG_SMP
495/**
496 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
497 * level in order to build a linear index from an
498 * MPIDR value. Resulting algorithm is a collision
499 * free hash carried out through shifting and ORing
500 */
501static void __init smp_build_mpidr_hash(void)
502{
503 u32 i, affinity;
504 u32 fs[3], bits[3], ls, mask = 0;
505 /*
506 * Pre-scan the list of MPIDRS and filter out bits that do
507 * not contribute to affinity levels, ie they never toggle.
508 */
509 for_each_possible_cpu(i)
510 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
511 pr_debug("mask of set bits 0x%x\n", mask);
512 /*
513 * Find and stash the last and first bit set at all affinity levels to
514 * check how many bits are required to represent them.
515 */
516 for (i = 0; i < 3; i++) {
517 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
518 /*
519 * Find the MSB bit and LSB bits position
520 * to determine how many bits are required
521 * to express the affinity level.
522 */
523 ls = fls(affinity);
524 fs[i] = affinity ? ffs(affinity) - 1 : 0;
525 bits[i] = ls - fs[i];
526 }
527 /*
528 * An index can be created from the MPIDR by isolating the
529 * significant bits at each affinity level and by shifting
530 * them in order to compress the 24 bits values space to a
531 * compressed set of values. This is equivalent to hashing
532 * the MPIDR through shifting and ORing. It is a collision free
533 * hash though not minimal since some levels might contain a number
534 * of CPUs that is not an exact power of 2 and their bit
535 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
536 */
537 mpidr_hash.shift_aff[0] = fs[0];
538 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
539 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
540 (bits[1] + bits[0]);
541 mpidr_hash.mask = mask;
542 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
543 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
544 mpidr_hash.shift_aff[0],
545 mpidr_hash.shift_aff[1],
546 mpidr_hash.shift_aff[2],
547 mpidr_hash.mask,
548 mpidr_hash.bits);
549 /*
550 * 4x is an arbitrary value used to warn on a hash table much bigger
551 * than expected on most systems.
552 */
553 if (mpidr_hash_size() > 4 * num_possible_cpus())
554 pr_warn("Large number of MPIDR hash buckets detected\n");
555 sync_cache_w(&mpidr_hash);
556}
557#endif
558
559static void __init setup_processor(void)
560{
561 struct proc_info_list *list;
562
563 /*
564 * locate processor in the list of supported processor
565 * types. The linker builds this table for us from the
566 * entries in arch/arm/mm/proc-*.S
567 */
568 list = lookup_processor_type(read_cpuid_id());
569 if (!list) {
570 pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
571 read_cpuid_id());
572 while (1);
573 }
574
575 cpu_name = list->cpu_name;
576 __cpu_architecture = __get_cpu_architecture();
577
578#ifdef MULTI_CPU
579 processor = *list->proc;
580#endif
581#ifdef MULTI_TLB
582 cpu_tlb = *list->tlb;
583#endif
584#ifdef MULTI_USER
585 cpu_user = *list->user;
586#endif
587#ifdef MULTI_CACHE
588 cpu_cache = *list->cache;
589#endif
590
591 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
592 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
593 proc_arch[cpu_architecture()], cr_alignment);
594
595 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
596 list->arch_name, ENDIANNESS);
597 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
598 list->elf_name, ENDIANNESS);
599 elf_hwcap = list->elf_hwcap;
600
601 cpuid_init_hwcaps();
602
603#ifndef CONFIG_ARM_THUMB
604 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
605#endif
606
607 erratum_a15_798181_init();
608
609 feat_v6_fixup();
610
611 cacheid_init();
612 cpu_init();
613}
614
615void __init dump_machine_table(void)
616{
617 const struct machine_desc *p;
618
619 early_print("Available machine support:\n\nID (hex)\tNAME\n");
620 for_each_machine_desc(p)
621 early_print("%08x\t%s\n", p->nr, p->name);
622
623 early_print("\nPlease check your kernel config and/or bootloader.\n");
624
625 while (true)
626 /* can't use cpu_relax() here as it may require MMU setup */;
627}
628
629int __init arm_add_memory(u64 start, u64 size)
630{
631 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
632 u64 aligned_start;
633
634 if (meminfo.nr_banks >= NR_BANKS) {
635 pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
636 (long long)start);
637 return -EINVAL;
638 }
639
640 /*
641 * Ensure that start/size are aligned to a page boundary.
642 * Size is appropriately rounded down, start is rounded up.
643 */
644 size -= start & ~PAGE_MASK;
645 aligned_start = PAGE_ALIGN(start);
646
647#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
648 if (aligned_start > ULONG_MAX) {
649 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
650 (long long)start);
651 return -EINVAL;
652 }
653
654 if (aligned_start + size > ULONG_MAX) {
655 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
656 (long long)start);
657 /*
658 * To ensure bank->start + bank->size is representable in
659 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
660 * This means we lose a page after masking.
661 */
662 size = ULONG_MAX - aligned_start;
663 }
664#endif
665
666 if (aligned_start < PHYS_OFFSET) {
667 if (aligned_start + size <= PHYS_OFFSET) {
668 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
669 aligned_start, aligned_start + size);
670 return -EINVAL;
671 }
672
673 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
674 aligned_start, (u64)PHYS_OFFSET);
675
676 size -= PHYS_OFFSET - aligned_start;
677 aligned_start = PHYS_OFFSET;
678 }
679
680 bank->start = aligned_start;
681 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
682
683 /*
684 * Check whether this memory region has non-zero size or
685 * invalid node number.
686 */
687 if (bank->size == 0)
688 return -EINVAL;
689
690 meminfo.nr_banks++;
691 return 0;
692}
693
694/*
695 * Pick out the memory size. We look for mem=size@start,
696 * where start and size are "size[KkMm]"
697 */
698static int __init early_mem(char *p)
699{
700 static int usermem __initdata = 0;
701 u64 size;
702 u64 start;
703 char *endp;
704
705 /*
706 * If the user specifies memory size, we
707 * blow away any automatically generated
708 * size.
709 */
710 if (usermem == 0) {
711 usermem = 1;
712 meminfo.nr_banks = 0;
713 }
714
715 start = PHYS_OFFSET;
716 size = memparse(p, &endp);
717 if (*endp == '@')
718 start = memparse(endp + 1, NULL);
719
720 arm_add_memory(start, size);
721
722 return 0;
723}
724early_param("mem", early_mem);
725
726static void __init request_standard_resources(const struct machine_desc *mdesc)
727{
728 struct memblock_region *region;
729 struct resource *res;
730
731 kernel_code.start = virt_to_phys(_text);
732 kernel_code.end = virt_to_phys(_etext - 1);
733 kernel_data.start = virt_to_phys(_sdata);
734 kernel_data.end = virt_to_phys(_end - 1);
735
736 for_each_memblock(memory, region) {
737 res = memblock_virt_alloc(sizeof(*res), 0);
738 res->name = "System RAM";
739 res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
740 res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
741 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
742
743 request_resource(&iomem_resource, res);
744
745 if (kernel_code.start >= res->start &&
746 kernel_code.end <= res->end)
747 request_resource(res, &kernel_code);
748 if (kernel_data.start >= res->start &&
749 kernel_data.end <= res->end)
750 request_resource(res, &kernel_data);
751 }
752
753 if (mdesc->video_start) {
754 video_ram.start = mdesc->video_start;
755 video_ram.end = mdesc->video_end;
756 request_resource(&iomem_resource, &video_ram);
757 }
758
759 /*
760 * Some machines don't have the possibility of ever
761 * possessing lp0, lp1 or lp2
762 */
763 if (mdesc->reserve_lp0)
764 request_resource(&ioport_resource, &lp0);
765 if (mdesc->reserve_lp1)
766 request_resource(&ioport_resource, &lp1);
767 if (mdesc->reserve_lp2)
768 request_resource(&ioport_resource, &lp2);
769}
770
771#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
772struct screen_info screen_info = {
773 .orig_video_lines = 30,
774 .orig_video_cols = 80,
775 .orig_video_mode = 0,
776 .orig_video_ega_bx = 0,
777 .orig_video_isVGA = 1,
778 .orig_video_points = 8
779};
780#endif
781
782static int __init customize_machine(void)
783{
784 /*
785 * customizes platform devices, or adds new ones
786 * On DT based machines, we fall back to populating the
787 * machine from the device tree, if no callback is provided,
788 * otherwise we would always need an init_machine callback.
789 */
790 if (machine_desc->init_machine)
791 machine_desc->init_machine();
792#ifdef CONFIG_OF
793 else
794 of_platform_populate(NULL, of_default_bus_match_table,
795 NULL, NULL);
796#endif
797 return 0;
798}
799arch_initcall(customize_machine);
800
801static int __init init_machine_late(void)
802{
803 if (machine_desc->init_late)
804 machine_desc->init_late();
805 return 0;
806}
807late_initcall(init_machine_late);
808
809#ifdef CONFIG_KEXEC
810static inline unsigned long long get_total_mem(void)
811{
812 unsigned long total;
813
814 total = max_low_pfn - min_low_pfn;
815 return total << PAGE_SHIFT;
816}
817
818/**
819 * reserve_crashkernel() - reserves memory are for crash kernel
820 *
821 * This function reserves memory area given in "crashkernel=" kernel command
822 * line parameter. The memory reserved is used by a dump capture kernel when
823 * primary kernel is crashing.
824 */
825static void __init reserve_crashkernel(void)
826{
827 unsigned long long crash_size, crash_base;
828 unsigned long long total_mem;
829 int ret;
830
831 total_mem = get_total_mem();
832 ret = parse_crashkernel(boot_command_line, total_mem,
833 &crash_size, &crash_base);
834 if (ret)
835 return;
836
837 ret = memblock_reserve(crash_base, crash_size);
838 if (ret < 0) {
839 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
840 (unsigned long)crash_base);
841 return;
842 }
843
844 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
845 (unsigned long)(crash_size >> 20),
846 (unsigned long)(crash_base >> 20),
847 (unsigned long)(total_mem >> 20));
848
849 crashk_res.start = crash_base;
850 crashk_res.end = crash_base + crash_size - 1;
851 insert_resource(&iomem_resource, &crashk_res);
852}
853#else
854static inline void reserve_crashkernel(void) {}
855#endif /* CONFIG_KEXEC */
856
857static int __init meminfo_cmp(const void *_a, const void *_b)
858{
859 const struct membank *a = _a, *b = _b;
860 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
861 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
862}
863
864void __init hyp_mode_check(void)
865{
866#ifdef CONFIG_ARM_VIRT_EXT
867 sync_boot_mode();
868
869 if (is_hyp_mode_available()) {
870 pr_info("CPU: All CPU(s) started in HYP mode.\n");
871 pr_info("CPU: Virtualization extensions available.\n");
872 } else if (is_hyp_mode_mismatched()) {
873 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
874 __boot_cpu_mode & MODE_MASK);
875 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
876 } else
877 pr_info("CPU: All CPU(s) started in SVC mode.\n");
878#endif
879}
880
881void __init setup_arch(char **cmdline_p)
882{
883 const struct machine_desc *mdesc;
884
885 setup_processor();
886 mdesc = setup_machine_fdt(__atags_pointer);
887 if (!mdesc)
888 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
889 machine_desc = mdesc;
890 machine_name = mdesc->name;
891
892 if (mdesc->reboot_mode != REBOOT_HARD)
893 reboot_mode = mdesc->reboot_mode;
894
895 init_mm.start_code = (unsigned long) _text;
896 init_mm.end_code = (unsigned long) _etext;
897 init_mm.end_data = (unsigned long) _edata;
898 init_mm.brk = (unsigned long) _end;
899
900 /* populate cmd_line too for later use, preserving boot_command_line */
901 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
902 *cmdline_p = cmd_line;
903
904 parse_early_param();
905
906 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
907
908 early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
909 setup_dma_zone(mdesc);
910 sanity_check_meminfo();
911 arm_memblock_init(&meminfo, mdesc);
912
913 paging_init(mdesc);
914 request_standard_resources(mdesc);
915
916 if (mdesc->restart)
917 arm_pm_restart = mdesc->restart;
918
919 unflatten_device_tree();
920
921 arm_dt_init_cpu_maps();
922 psci_init();
923#ifdef CONFIG_SMP
924 if (is_smp()) {
925 if (!mdesc->smp_init || !mdesc->smp_init()) {
926 if (psci_smp_available())
927 smp_set_ops(&psci_smp_ops);
928 else if (mdesc->smp)
929 smp_set_ops(mdesc->smp);
930 }
931 smp_init_cpus();
932 smp_build_mpidr_hash();
933 }
934#endif
935
936 if (!is_smp())
937 hyp_mode_check();
938
939 reserve_crashkernel();
940
941#ifdef CONFIG_MULTI_IRQ_HANDLER
942 handle_arch_irq = mdesc->handle_irq;
943#endif
944
945#ifdef CONFIG_VT
946#if defined(CONFIG_VGA_CONSOLE)
947 conswitchp = &vga_con;
948#elif defined(CONFIG_DUMMY_CONSOLE)
949 conswitchp = &dummy_con;
950#endif
951#endif
952
953 if (mdesc->init_early)
954 mdesc->init_early();
955}
956
957
958static int __init topology_init(void)
959{
960 int cpu;
961
962 for_each_possible_cpu(cpu) {
963 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
964 cpuinfo->cpu.hotpluggable = 1;
965 register_cpu(&cpuinfo->cpu, cpu);
966 }
967
968 return 0;
969}
970subsys_initcall(topology_init);
971
972#ifdef CONFIG_HAVE_PROC_CPU
973static int __init proc_cpu_init(void)
974{
975 struct proc_dir_entry *res;
976
977 res = proc_mkdir("cpu", NULL);
978 if (!res)
979 return -ENOMEM;
980 return 0;
981}
982fs_initcall(proc_cpu_init);
983#endif
984
985static const char *hwcap_str[] = {
986 "swp",
987 "half",
988 "thumb",
989 "26bit",
990 "fastmult",
991 "fpa",
992 "vfp",
993 "edsp",
994 "java",
995 "iwmmxt",
996 "crunch",
997 "thumbee",
998 "neon",
999 "vfpv3",
1000 "vfpv3d16",
1001 "tls",
1002 "vfpv4",
1003 "idiva",
1004 "idivt",
1005 "vfpd32",
1006 "lpae",
1007 "evtstrm",
1008 NULL
1009};
1010
1011static const char *hwcap2_str[] = {
1012 "aes",
1013 "pmull",
1014 "sha1",
1015 "sha2",
1016 "crc32",
1017 NULL
1018};
1019
1020static int c_show(struct seq_file *m, void *v)
1021{
1022 int i, j;
1023 u32 cpuid;
1024
1025 for_each_online_cpu(i) {
1026 /*
1027 * glibc reads /proc/cpuinfo to determine the number of
1028 * online processors, looking for lines beginning with
1029 * "processor". Give glibc what it expects.
1030 */
1031 seq_printf(m, "processor\t: %d\n", i);
1032 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1033 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1034 cpu_name, cpuid & 15, elf_platform);
1035
1036 /* dump out the processor features */
1037 seq_puts(m, "Features\t: ");
1038
1039 for (j = 0; hwcap_str[j]; j++)
1040 if (elf_hwcap & (1 << j))
1041 seq_printf(m, "%s ", hwcap_str[j]);
1042
1043 for (j = 0; hwcap2_str[j]; j++)
1044 if (elf_hwcap2 & (1 << j))
1045 seq_printf(m, "%s ", hwcap2_str[j]);
1046
1047 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1048 seq_printf(m, "CPU architecture: %s\n",
1049 proc_arch[cpu_architecture()]);
1050
1051 if ((cpuid & 0x0008f000) == 0x00000000) {
1052 /* pre-ARM7 */
1053 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1054 } else {
1055 if ((cpuid & 0x0008f000) == 0x00007000) {
1056 /* ARM7 */
1057 seq_printf(m, "CPU variant\t: 0x%02x\n",
1058 (cpuid >> 16) & 127);
1059 } else {
1060 /* post-ARM7 */
1061 seq_printf(m, "CPU variant\t: 0x%x\n",
1062 (cpuid >> 20) & 15);
1063 }
1064 seq_printf(m, "CPU part\t: 0x%03x\n",
1065 (cpuid >> 4) & 0xfff);
1066 }
1067 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1068 }
1069
1070 seq_printf(m, "Hardware\t: %s\n", machine_name);
1071 seq_printf(m, "Revision\t: %04x\n", system_rev);
1072 seq_printf(m, "Serial\t\t: %08x%08x\n",
1073 system_serial_high, system_serial_low);
1074
1075 return 0;
1076}
1077
1078static void *c_start(struct seq_file *m, loff_t *pos)
1079{
1080 return *pos < 1 ? (void *)1 : NULL;
1081}
1082
1083static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1084{
1085 ++*pos;
1086 return NULL;
1087}
1088
1089static void c_stop(struct seq_file *m, void *v)
1090{
1091}
1092
1093const struct seq_operations cpuinfo_op = {
1094 .start = c_start,
1095 .next = c_next,
1096 .stop = c_stop,
1097 .show = c_show
1098};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
6 */
7#include <linux/efi.h>
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
16#include <linux/seq_file.h>
17#include <linux/screen_info.h>
18#include <linux/of_platform.h>
19#include <linux/init.h>
20#include <linux/kexec.h>
21#include <linux/libfdt.h>
22#include <linux/of_fdt.h>
23#include <linux/cpu.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/proc_fs.h>
27#include <linux/memblock.h>
28#include <linux/bug.h>
29#include <linux/compiler.h>
30#include <linux/sort.h>
31#include <linux/psci.h>
32
33#include <asm/unified.h>
34#include <asm/cp15.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/efi.h>
38#include <asm/elf.h>
39#include <asm/early_ioremap.h>
40#include <asm/fixmap.h>
41#include <asm/procinfo.h>
42#include <asm/psci.h>
43#include <asm/sections.h>
44#include <asm/setup.h>
45#include <asm/smp_plat.h>
46#include <asm/mach-types.h>
47#include <asm/cacheflush.h>
48#include <asm/cachetype.h>
49#include <asm/tlbflush.h>
50#include <asm/xen/hypervisor.h>
51
52#include <asm/prom.h>
53#include <asm/mach/arch.h>
54#include <asm/mach/irq.h>
55#include <asm/mach/time.h>
56#include <asm/system_info.h>
57#include <asm/system_misc.h>
58#include <asm/traps.h>
59#include <asm/unwind.h>
60#include <asm/memblock.h>
61#include <asm/virt.h>
62#include <asm/kasan.h>
63
64#include "atags.h"
65
66
67#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68char fpe_type[8];
69
70static int __init fpe_setup(char *line)
71{
72 memcpy(fpe_type, line, 8);
73 return 1;
74}
75
76__setup("fpe=", fpe_setup);
77#endif
78
79extern void init_default_cache_policy(unsigned long);
80extern void paging_init(const struct machine_desc *desc);
81extern void early_mm_init(const struct machine_desc *);
82extern void adjust_lowmem_bounds(void);
83extern enum reboot_mode reboot_mode;
84extern void setup_dma_zone(const struct machine_desc *desc);
85
86unsigned int processor_id;
87EXPORT_SYMBOL(processor_id);
88unsigned int __machine_arch_type __read_mostly;
89EXPORT_SYMBOL(__machine_arch_type);
90unsigned int cacheid __read_mostly;
91EXPORT_SYMBOL(cacheid);
92
93unsigned int __atags_pointer __initdata;
94
95unsigned int system_rev;
96EXPORT_SYMBOL(system_rev);
97
98const char *system_serial;
99EXPORT_SYMBOL(system_serial);
100
101unsigned int system_serial_low;
102EXPORT_SYMBOL(system_serial_low);
103
104unsigned int system_serial_high;
105EXPORT_SYMBOL(system_serial_high);
106
107unsigned int elf_hwcap __read_mostly;
108EXPORT_SYMBOL(elf_hwcap);
109
110unsigned int elf_hwcap2 __read_mostly;
111EXPORT_SYMBOL(elf_hwcap2);
112
113
114#ifdef MULTI_CPU
115struct processor processor __ro_after_init;
116#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
117struct processor *cpu_vtable[NR_CPUS] = {
118 [0] = &processor,
119};
120#endif
121#endif
122#ifdef MULTI_TLB
123struct cpu_tlb_fns cpu_tlb __ro_after_init;
124#endif
125#ifdef MULTI_USER
126struct cpu_user_fns cpu_user __ro_after_init;
127#endif
128#ifdef MULTI_CACHE
129struct cpu_cache_fns cpu_cache __ro_after_init;
130#endif
131#ifdef CONFIG_OUTER_CACHE
132struct outer_cache_fns outer_cache __ro_after_init;
133EXPORT_SYMBOL(outer_cache);
134#endif
135
136/*
137 * Cached cpu_architecture() result for use by assembler code.
138 * C code should use the cpu_architecture() function instead of accessing this
139 * variable directly.
140 */
141int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
142
143struct stack {
144 u32 irq[4];
145 u32 abt[4];
146 u32 und[4];
147 u32 fiq[4];
148} ____cacheline_aligned;
149
150#ifndef CONFIG_CPU_V7M
151static struct stack stacks[NR_CPUS];
152#endif
153
154char elf_platform[ELF_PLATFORM_SIZE];
155EXPORT_SYMBOL(elf_platform);
156
157static const char *cpu_name;
158static const char *machine_name;
159static char __initdata cmd_line[COMMAND_LINE_SIZE];
160const struct machine_desc *machine_desc __initdata;
161
162static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
163#define ENDIANNESS ((char)endian_test.l)
164
165DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
166
167/*
168 * Standard memory resources
169 */
170static struct resource mem_res[] = {
171 {
172 .name = "Video RAM",
173 .start = 0,
174 .end = 0,
175 .flags = IORESOURCE_MEM
176 },
177 {
178 .name = "Kernel code",
179 .start = 0,
180 .end = 0,
181 .flags = IORESOURCE_SYSTEM_RAM
182 },
183 {
184 .name = "Kernel data",
185 .start = 0,
186 .end = 0,
187 .flags = IORESOURCE_SYSTEM_RAM
188 }
189};
190
191#define video_ram mem_res[0]
192#define kernel_code mem_res[1]
193#define kernel_data mem_res[2]
194
195static struct resource io_res[] = {
196 {
197 .name = "reserved",
198 .start = 0x3bc,
199 .end = 0x3be,
200 .flags = IORESOURCE_IO | IORESOURCE_BUSY
201 },
202 {
203 .name = "reserved",
204 .start = 0x378,
205 .end = 0x37f,
206 .flags = IORESOURCE_IO | IORESOURCE_BUSY
207 },
208 {
209 .name = "reserved",
210 .start = 0x278,
211 .end = 0x27f,
212 .flags = IORESOURCE_IO | IORESOURCE_BUSY
213 }
214};
215
216#define lp0 io_res[0]
217#define lp1 io_res[1]
218#define lp2 io_res[2]
219
220static const char *proc_arch[] = {
221 "undefined/unknown",
222 "3",
223 "4",
224 "4T",
225 "5",
226 "5T",
227 "5TE",
228 "5TEJ",
229 "6TEJ",
230 "7",
231 "7M",
232 "?(12)",
233 "?(13)",
234 "?(14)",
235 "?(15)",
236 "?(16)",
237 "?(17)",
238};
239
240#ifdef CONFIG_CPU_V7M
241static int __get_cpu_architecture(void)
242{
243 return CPU_ARCH_ARMv7M;
244}
245#else
246static int __get_cpu_architecture(void)
247{
248 int cpu_arch;
249
250 if ((read_cpuid_id() & 0x0008f000) == 0) {
251 cpu_arch = CPU_ARCH_UNKNOWN;
252 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
253 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
254 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
255 cpu_arch = (read_cpuid_id() >> 16) & 7;
256 if (cpu_arch)
257 cpu_arch += CPU_ARCH_ARMv3;
258 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
259 /* Revised CPUID format. Read the Memory Model Feature
260 * Register 0 and check for VMSAv7 or PMSAv7 */
261 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
262 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
263 (mmfr0 & 0x000000f0) >= 0x00000030)
264 cpu_arch = CPU_ARCH_ARMv7;
265 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
266 (mmfr0 & 0x000000f0) == 0x00000020)
267 cpu_arch = CPU_ARCH_ARMv6;
268 else
269 cpu_arch = CPU_ARCH_UNKNOWN;
270 } else
271 cpu_arch = CPU_ARCH_UNKNOWN;
272
273 return cpu_arch;
274}
275#endif
276
277int __pure cpu_architecture(void)
278{
279 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
280
281 return __cpu_architecture;
282}
283
284static int cpu_has_aliasing_icache(unsigned int arch)
285{
286 int aliasing_icache;
287 unsigned int id_reg, num_sets, line_size;
288
289 /* PIPT caches never alias. */
290 if (icache_is_pipt())
291 return 0;
292
293 /* arch specifies the register format */
294 switch (arch) {
295 case CPU_ARCH_ARMv7:
296 set_csselr(CSSELR_ICACHE | CSSELR_L1);
297 isb();
298 id_reg = read_ccsidr();
299 line_size = 4 << ((id_reg & 0x7) + 2);
300 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
301 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
302 break;
303 case CPU_ARCH_ARMv6:
304 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
305 break;
306 default:
307 /* I-cache aliases will be handled by D-cache aliasing code */
308 aliasing_icache = 0;
309 }
310
311 return aliasing_icache;
312}
313
314static void __init cacheid_init(void)
315{
316 unsigned int arch = cpu_architecture();
317
318 if (arch >= CPU_ARCH_ARMv6) {
319 unsigned int cachetype = read_cpuid_cachetype();
320
321 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
322 cacheid = 0;
323 } else if ((cachetype & (7 << 29)) == 4 << 29) {
324 /* ARMv7 register format */
325 arch = CPU_ARCH_ARMv7;
326 cacheid = CACHEID_VIPT_NONALIASING;
327 switch (cachetype & (3 << 14)) {
328 case (1 << 14):
329 cacheid |= CACHEID_ASID_TAGGED;
330 break;
331 case (3 << 14):
332 cacheid |= CACHEID_PIPT;
333 break;
334 }
335 } else {
336 arch = CPU_ARCH_ARMv6;
337 if (cachetype & (1 << 23))
338 cacheid = CACHEID_VIPT_ALIASING;
339 else
340 cacheid = CACHEID_VIPT_NONALIASING;
341 }
342 if (cpu_has_aliasing_icache(arch))
343 cacheid |= CACHEID_VIPT_I_ALIASING;
344 } else {
345 cacheid = CACHEID_VIVT;
346 }
347
348 pr_info("CPU: %s data cache, %s instruction cache\n",
349 cache_is_vivt() ? "VIVT" :
350 cache_is_vipt_aliasing() ? "VIPT aliasing" :
351 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
352 cache_is_vivt() ? "VIVT" :
353 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
354 icache_is_vipt_aliasing() ? "VIPT aliasing" :
355 icache_is_pipt() ? "PIPT" :
356 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
357}
358
359/*
360 * These functions re-use the assembly code in head.S, which
361 * already provide the required functionality.
362 */
363extern struct proc_info_list *lookup_processor_type(unsigned int);
364
365void __init early_print(const char *str, ...)
366{
367 extern void printascii(const char *);
368 char buf[256];
369 va_list ap;
370
371 va_start(ap, str);
372 vsnprintf(buf, sizeof(buf), str, ap);
373 va_end(ap);
374
375#ifdef CONFIG_DEBUG_LL
376 printascii(buf);
377#endif
378 printk("%s", buf);
379}
380
381#ifdef CONFIG_ARM_PATCH_IDIV
382
383static inline u32 __attribute_const__ sdiv_instruction(void)
384{
385 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
386 /* "sdiv r0, r0, r1" */
387 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
388 return __opcode_to_mem_thumb32(insn);
389 }
390
391 /* "sdiv r0, r0, r1" */
392 return __opcode_to_mem_arm(0xe710f110);
393}
394
395static inline u32 __attribute_const__ udiv_instruction(void)
396{
397 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
398 /* "udiv r0, r0, r1" */
399 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
400 return __opcode_to_mem_thumb32(insn);
401 }
402
403 /* "udiv r0, r0, r1" */
404 return __opcode_to_mem_arm(0xe730f110);
405}
406
407static inline u32 __attribute_const__ bx_lr_instruction(void)
408{
409 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
410 /* "bx lr; nop" */
411 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
412 return __opcode_to_mem_thumb32(insn);
413 }
414
415 /* "bx lr" */
416 return __opcode_to_mem_arm(0xe12fff1e);
417}
418
419static void __init patch_aeabi_idiv(void)
420{
421 extern void __aeabi_uidiv(void);
422 extern void __aeabi_idiv(void);
423 uintptr_t fn_addr;
424 unsigned int mask;
425
426 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
427 if (!(elf_hwcap & mask))
428 return;
429
430 pr_info("CPU: div instructions available: patching division code\n");
431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
433 asm ("" : "+g" (fn_addr));
434 ((u32 *)fn_addr)[0] = udiv_instruction();
435 ((u32 *)fn_addr)[1] = bx_lr_instruction();
436 flush_icache_range(fn_addr, fn_addr + 8);
437
438 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
439 asm ("" : "+g" (fn_addr));
440 ((u32 *)fn_addr)[0] = sdiv_instruction();
441 ((u32 *)fn_addr)[1] = bx_lr_instruction();
442 flush_icache_range(fn_addr, fn_addr + 8);
443}
444
445#else
446static inline void patch_aeabi_idiv(void) { }
447#endif
448
449static void __init cpuid_init_hwcaps(void)
450{
451 int block;
452 u32 isar5;
453 u32 isar6;
454 u32 pfr2;
455
456 if (cpu_architecture() < CPU_ARCH_ARMv7)
457 return;
458
459 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
460 if (block >= 2)
461 elf_hwcap |= HWCAP_IDIVA;
462 if (block >= 1)
463 elf_hwcap |= HWCAP_IDIVT;
464
465 /* LPAE implies atomic ldrd/strd instructions */
466 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
467 if (block >= 5)
468 elf_hwcap |= HWCAP_LPAE;
469
470 /* check for supported v8 Crypto instructions */
471 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
472
473 block = cpuid_feature_extract_field(isar5, 4);
474 if (block >= 2)
475 elf_hwcap2 |= HWCAP2_PMULL;
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_AES;
478
479 block = cpuid_feature_extract_field(isar5, 8);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA1;
482
483 block = cpuid_feature_extract_field(isar5, 12);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_SHA2;
486
487 block = cpuid_feature_extract_field(isar5, 16);
488 if (block >= 1)
489 elf_hwcap2 |= HWCAP2_CRC32;
490
491 /* Check for Speculation barrier instruction */
492 isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
493 block = cpuid_feature_extract_field(isar6, 12);
494 if (block >= 1)
495 elf_hwcap2 |= HWCAP2_SB;
496
497 /* Check for Speculative Store Bypassing control */
498 pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
499 block = cpuid_feature_extract_field(pfr2, 4);
500 if (block >= 1)
501 elf_hwcap2 |= HWCAP2_SSBS;
502}
503
504static void __init elf_hwcap_fixup(void)
505{
506 unsigned id = read_cpuid_id();
507
508 /*
509 * HWCAP_TLS is available only on 1136 r1p0 and later,
510 * see also kuser_get_tls_init.
511 */
512 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
513 ((id >> 20) & 3) == 0) {
514 elf_hwcap &= ~HWCAP_TLS;
515 return;
516 }
517
518 /* Verify if CPUID scheme is implemented */
519 if ((id & 0x000f0000) != 0x000f0000)
520 return;
521
522 /*
523 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
524 * avoid advertising SWP; it may not be atomic with
525 * multiprocessing cores.
526 */
527 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
528 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
529 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
530 elf_hwcap &= ~HWCAP_SWP;
531}
532
533/*
534 * cpu_init - initialise one CPU.
535 *
536 * cpu_init sets up the per-CPU stacks.
537 */
538void notrace cpu_init(void)
539{
540#ifndef CONFIG_CPU_V7M
541 unsigned int cpu = smp_processor_id();
542 struct stack *stk = &stacks[cpu];
543
544 if (cpu >= NR_CPUS) {
545 pr_crit("CPU%u: bad primary CPU number\n", cpu);
546 BUG();
547 }
548
549 /*
550 * This only works on resume and secondary cores. For booting on the
551 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
552 */
553 set_my_cpu_offset(per_cpu_offset(cpu));
554
555 cpu_proc_init();
556
557 /*
558 * Define the placement constraint for the inline asm directive below.
559 * In Thumb-2, msr with an immediate value is not allowed.
560 */
561#ifdef CONFIG_THUMB2_KERNEL
562#define PLC_l "l"
563#define PLC_r "r"
564#else
565#define PLC_l "I"
566#define PLC_r "I"
567#endif
568
569 /*
570 * setup stacks for re-entrant exception handlers
571 */
572 __asm__ (
573 "msr cpsr_c, %1\n\t"
574 "add r14, %0, %2\n\t"
575 "mov sp, r14\n\t"
576 "msr cpsr_c, %3\n\t"
577 "add r14, %0, %4\n\t"
578 "mov sp, r14\n\t"
579 "msr cpsr_c, %5\n\t"
580 "add r14, %0, %6\n\t"
581 "mov sp, r14\n\t"
582 "msr cpsr_c, %7\n\t"
583 "add r14, %0, %8\n\t"
584 "mov sp, r14\n\t"
585 "msr cpsr_c, %9"
586 :
587 : "r" (stk),
588 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
589 "I" (offsetof(struct stack, irq[0])),
590 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
591 "I" (offsetof(struct stack, abt[0])),
592 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
593 "I" (offsetof(struct stack, und[0])),
594 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
595 "I" (offsetof(struct stack, fiq[0])),
596 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
597 : "r14");
598#endif
599}
600
601u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
602
603void __init smp_setup_processor_id(void)
604{
605 int i;
606 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
607 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
608
609 cpu_logical_map(0) = cpu;
610 for (i = 1; i < nr_cpu_ids; ++i)
611 cpu_logical_map(i) = i == cpu ? 0 : i;
612
613 /*
614 * clear __my_cpu_offset on boot CPU to avoid hang caused by
615 * using percpu variable early, for example, lockdep will
616 * access percpu variable inside lock_release
617 */
618 set_my_cpu_offset(0);
619
620 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
621}
622
623struct mpidr_hash mpidr_hash;
624#ifdef CONFIG_SMP
625/**
626 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
627 * level in order to build a linear index from an
628 * MPIDR value. Resulting algorithm is a collision
629 * free hash carried out through shifting and ORing
630 */
631static void __init smp_build_mpidr_hash(void)
632{
633 u32 i, affinity;
634 u32 fs[3], bits[3], ls, mask = 0;
635 /*
636 * Pre-scan the list of MPIDRS and filter out bits that do
637 * not contribute to affinity levels, ie they never toggle.
638 */
639 for_each_possible_cpu(i)
640 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
641 pr_debug("mask of set bits 0x%x\n", mask);
642 /*
643 * Find and stash the last and first bit set at all affinity levels to
644 * check how many bits are required to represent them.
645 */
646 for (i = 0; i < 3; i++) {
647 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
648 /*
649 * Find the MSB bit and LSB bits position
650 * to determine how many bits are required
651 * to express the affinity level.
652 */
653 ls = fls(affinity);
654 fs[i] = affinity ? ffs(affinity) - 1 : 0;
655 bits[i] = ls - fs[i];
656 }
657 /*
658 * An index can be created from the MPIDR by isolating the
659 * significant bits at each affinity level and by shifting
660 * them in order to compress the 24 bits values space to a
661 * compressed set of values. This is equivalent to hashing
662 * the MPIDR through shifting and ORing. It is a collision free
663 * hash though not minimal since some levels might contain a number
664 * of CPUs that is not an exact power of 2 and their bit
665 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
666 */
667 mpidr_hash.shift_aff[0] = fs[0];
668 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
669 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
670 (bits[1] + bits[0]);
671 mpidr_hash.mask = mask;
672 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
673 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
674 mpidr_hash.shift_aff[0],
675 mpidr_hash.shift_aff[1],
676 mpidr_hash.shift_aff[2],
677 mpidr_hash.mask,
678 mpidr_hash.bits);
679 /*
680 * 4x is an arbitrary value used to warn on a hash table much bigger
681 * than expected on most systems.
682 */
683 if (mpidr_hash_size() > 4 * num_possible_cpus())
684 pr_warn("Large number of MPIDR hash buckets detected\n");
685 sync_cache_w(&mpidr_hash);
686}
687#endif
688
689/*
690 * locate processor in the list of supported processor types. The linker
691 * builds this table for us from the entries in arch/arm/mm/proc-*.S
692 */
693struct proc_info_list *lookup_processor(u32 midr)
694{
695 struct proc_info_list *list = lookup_processor_type(midr);
696
697 if (!list) {
698 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
699 smp_processor_id(), midr);
700 while (1)
701 /* can't use cpu_relax() here as it may require MMU setup */;
702 }
703
704 return list;
705}
706
707static void __init setup_processor(void)
708{
709 unsigned int midr = read_cpuid_id();
710 struct proc_info_list *list = lookup_processor(midr);
711
712 cpu_name = list->cpu_name;
713 __cpu_architecture = __get_cpu_architecture();
714
715 init_proc_vtable(list->proc);
716#ifdef MULTI_TLB
717 cpu_tlb = *list->tlb;
718#endif
719#ifdef MULTI_USER
720 cpu_user = *list->user;
721#endif
722#ifdef MULTI_CACHE
723 cpu_cache = *list->cache;
724#endif
725
726 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
727 list->cpu_name, midr, midr & 15,
728 proc_arch[cpu_architecture()], get_cr());
729
730 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
731 list->arch_name, ENDIANNESS);
732 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
733 list->elf_name, ENDIANNESS);
734 elf_hwcap = list->elf_hwcap;
735
736 cpuid_init_hwcaps();
737 patch_aeabi_idiv();
738
739#ifndef CONFIG_ARM_THUMB
740 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
741#endif
742#ifdef CONFIG_MMU
743 init_default_cache_policy(list->__cpu_mm_mmu_flags);
744#endif
745 erratum_a15_798181_init();
746
747 elf_hwcap_fixup();
748
749 cacheid_init();
750 cpu_init();
751}
752
753void __init dump_machine_table(void)
754{
755 const struct machine_desc *p;
756
757 early_print("Available machine support:\n\nID (hex)\tNAME\n");
758 for_each_machine_desc(p)
759 early_print("%08x\t%s\n", p->nr, p->name);
760
761 early_print("\nPlease check your kernel config and/or bootloader.\n");
762
763 while (true)
764 /* can't use cpu_relax() here as it may require MMU setup */;
765}
766
767int __init arm_add_memory(u64 start, u64 size)
768{
769 u64 aligned_start;
770
771 /*
772 * Ensure that start/size are aligned to a page boundary.
773 * Size is rounded down, start is rounded up.
774 */
775 aligned_start = PAGE_ALIGN(start);
776 if (aligned_start > start + size)
777 size = 0;
778 else
779 size -= aligned_start - start;
780
781#ifndef CONFIG_PHYS_ADDR_T_64BIT
782 if (aligned_start > ULONG_MAX) {
783 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
784 start);
785 return -EINVAL;
786 }
787
788 if (aligned_start + size > ULONG_MAX) {
789 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
790 (long long)start);
791 /*
792 * To ensure bank->start + bank->size is representable in
793 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
794 * This means we lose a page after masking.
795 */
796 size = ULONG_MAX - aligned_start;
797 }
798#endif
799
800 if (aligned_start < PHYS_OFFSET) {
801 if (aligned_start + size <= PHYS_OFFSET) {
802 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
803 aligned_start, aligned_start + size);
804 return -EINVAL;
805 }
806
807 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
808 aligned_start, (u64)PHYS_OFFSET);
809
810 size -= PHYS_OFFSET - aligned_start;
811 aligned_start = PHYS_OFFSET;
812 }
813
814 start = aligned_start;
815 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
816
817 /*
818 * Check whether this memory region has non-zero size or
819 * invalid node number.
820 */
821 if (size == 0)
822 return -EINVAL;
823
824 memblock_add(start, size);
825 return 0;
826}
827
828/*
829 * Pick out the memory size. We look for mem=size@start,
830 * where start and size are "size[KkMm]"
831 */
832
833static int __init early_mem(char *p)
834{
835 static int usermem __initdata = 0;
836 u64 size;
837 u64 start;
838 char *endp;
839
840 /*
841 * If the user specifies memory size, we
842 * blow away any automatically generated
843 * size.
844 */
845 if (usermem == 0) {
846 usermem = 1;
847 memblock_remove(memblock_start_of_DRAM(),
848 memblock_end_of_DRAM() - memblock_start_of_DRAM());
849 }
850
851 start = PHYS_OFFSET;
852 size = memparse(p, &endp);
853 if (*endp == '@')
854 start = memparse(endp + 1, NULL);
855
856 arm_add_memory(start, size);
857
858 return 0;
859}
860early_param("mem", early_mem);
861
862static void __init request_standard_resources(const struct machine_desc *mdesc)
863{
864 phys_addr_t start, end, res_end;
865 struct resource *res;
866 u64 i;
867
868 kernel_code.start = virt_to_phys(_text);
869 kernel_code.end = virt_to_phys(__init_begin - 1);
870 kernel_data.start = virt_to_phys(_sdata);
871 kernel_data.end = virt_to_phys(_end - 1);
872
873 for_each_mem_range(i, &start, &end) {
874 unsigned long boot_alias_start;
875
876 /*
877 * In memblock, end points to the first byte after the
878 * range while in resourses, end points to the last byte in
879 * the range.
880 */
881 res_end = end - 1;
882
883 /*
884 * Some systems have a special memory alias which is only
885 * used for booting. We need to advertise this region to
886 * kexec-tools so they know where bootable RAM is located.
887 */
888 boot_alias_start = phys_to_idmap(start);
889 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
890 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
891 if (!res)
892 panic("%s: Failed to allocate %zu bytes\n",
893 __func__, sizeof(*res));
894 res->name = "System RAM (boot alias)";
895 res->start = boot_alias_start;
896 res->end = phys_to_idmap(res_end);
897 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
898 request_resource(&iomem_resource, res);
899 }
900
901 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
902 if (!res)
903 panic("%s: Failed to allocate %zu bytes\n", __func__,
904 sizeof(*res));
905 res->name = "System RAM";
906 res->start = start;
907 res->end = res_end;
908 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
909
910 request_resource(&iomem_resource, res);
911
912 if (kernel_code.start >= res->start &&
913 kernel_code.end <= res->end)
914 request_resource(res, &kernel_code);
915 if (kernel_data.start >= res->start &&
916 kernel_data.end <= res->end)
917 request_resource(res, &kernel_data);
918 }
919
920 if (mdesc->video_start) {
921 video_ram.start = mdesc->video_start;
922 video_ram.end = mdesc->video_end;
923 request_resource(&iomem_resource, &video_ram);
924 }
925
926 /*
927 * Some machines don't have the possibility of ever
928 * possessing lp0, lp1 or lp2
929 */
930 if (mdesc->reserve_lp0)
931 request_resource(&ioport_resource, &lp0);
932 if (mdesc->reserve_lp1)
933 request_resource(&ioport_resource, &lp1);
934 if (mdesc->reserve_lp2)
935 request_resource(&ioport_resource, &lp2);
936}
937
938#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
939 defined(CONFIG_EFI)
940struct screen_info screen_info = {
941 .orig_video_lines = 30,
942 .orig_video_cols = 80,
943 .orig_video_mode = 0,
944 .orig_video_ega_bx = 0,
945 .orig_video_isVGA = 1,
946 .orig_video_points = 8
947};
948#endif
949
950static int __init customize_machine(void)
951{
952 /*
953 * customizes platform devices, or adds new ones
954 * On DT based machines, we fall back to populating the
955 * machine from the device tree, if no callback is provided,
956 * otherwise we would always need an init_machine callback.
957 */
958 if (machine_desc->init_machine)
959 machine_desc->init_machine();
960
961 return 0;
962}
963arch_initcall(customize_machine);
964
965static int __init init_machine_late(void)
966{
967 struct device_node *root;
968 int ret;
969
970 if (machine_desc->init_late)
971 machine_desc->init_late();
972
973 root = of_find_node_by_path("/");
974 if (root) {
975 ret = of_property_read_string(root, "serial-number",
976 &system_serial);
977 if (ret)
978 system_serial = NULL;
979 }
980
981 if (!system_serial)
982 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
983 system_serial_high,
984 system_serial_low);
985
986 return 0;
987}
988late_initcall(init_machine_late);
989
990#ifdef CONFIG_KEXEC
991/*
992 * The crash region must be aligned to 128MB to avoid
993 * zImage relocating below the reserved region.
994 */
995#define CRASH_ALIGN (128 << 20)
996
997static inline unsigned long long get_total_mem(void)
998{
999 unsigned long total;
1000
1001 total = max_low_pfn - min_low_pfn;
1002 return total << PAGE_SHIFT;
1003}
1004
1005/**
1006 * reserve_crashkernel() - reserves memory are for crash kernel
1007 *
1008 * This function reserves memory area given in "crashkernel=" kernel command
1009 * line parameter. The memory reserved is used by a dump capture kernel when
1010 * primary kernel is crashing.
1011 */
1012static void __init reserve_crashkernel(void)
1013{
1014 unsigned long long crash_size, crash_base;
1015 unsigned long long total_mem;
1016 int ret;
1017
1018 total_mem = get_total_mem();
1019 ret = parse_crashkernel(boot_command_line, total_mem,
1020 &crash_size, &crash_base);
1021 /* invalid value specified or crashkernel=0 */
1022 if (ret || !crash_size)
1023 return;
1024
1025 if (crash_base <= 0) {
1026 unsigned long long crash_max = idmap_to_phys((u32)~0);
1027 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1028 if (crash_max > lowmem_max)
1029 crash_max = lowmem_max;
1030
1031 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1032 CRASH_ALIGN, crash_max);
1033 if (!crash_base) {
1034 pr_err("crashkernel reservation failed - No suitable area found.\n");
1035 return;
1036 }
1037 } else {
1038 unsigned long long crash_max = crash_base + crash_size;
1039 unsigned long long start;
1040
1041 start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1042 crash_base, crash_max);
1043 if (!start) {
1044 pr_err("crashkernel reservation failed - memory is in use.\n");
1045 return;
1046 }
1047 }
1048
1049 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1050 (unsigned long)(crash_size >> 20),
1051 (unsigned long)(crash_base >> 20),
1052 (unsigned long)(total_mem >> 20));
1053
1054 /* The crashk resource must always be located in normal mem */
1055 crashk_res.start = crash_base;
1056 crashk_res.end = crash_base + crash_size - 1;
1057 insert_resource(&iomem_resource, &crashk_res);
1058
1059 if (arm_has_idmap_alias()) {
1060 /*
1061 * If we have a special RAM alias for use at boot, we
1062 * need to advertise to kexec tools where the alias is.
1063 */
1064 static struct resource crashk_boot_res = {
1065 .name = "Crash kernel (boot alias)",
1066 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1067 };
1068
1069 crashk_boot_res.start = phys_to_idmap(crash_base);
1070 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1071 insert_resource(&iomem_resource, &crashk_boot_res);
1072 }
1073}
1074#else
1075static inline void reserve_crashkernel(void) {}
1076#endif /* CONFIG_KEXEC */
1077
1078void __init hyp_mode_check(void)
1079{
1080#ifdef CONFIG_ARM_VIRT_EXT
1081 sync_boot_mode();
1082
1083 if (is_hyp_mode_available()) {
1084 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1085 pr_info("CPU: Virtualization extensions available.\n");
1086 } else if (is_hyp_mode_mismatched()) {
1087 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1088 __boot_cpu_mode & MODE_MASK);
1089 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1090 } else
1091 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1092#endif
1093}
1094
1095static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1096
1097static int arm_restart(struct notifier_block *nb, unsigned long action,
1098 void *data)
1099{
1100 __arm_pm_restart(action, data);
1101 return NOTIFY_DONE;
1102}
1103
1104static struct notifier_block arm_restart_nb = {
1105 .notifier_call = arm_restart,
1106 .priority = 128,
1107};
1108
1109void __init setup_arch(char **cmdline_p)
1110{
1111 const struct machine_desc *mdesc = NULL;
1112 void *atags_vaddr = NULL;
1113
1114 if (__atags_pointer)
1115 atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1116
1117 setup_processor();
1118 if (atags_vaddr) {
1119 mdesc = setup_machine_fdt(atags_vaddr);
1120 if (mdesc)
1121 memblock_reserve(__atags_pointer,
1122 fdt_totalsize(atags_vaddr));
1123 }
1124 if (!mdesc)
1125 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1126 if (!mdesc) {
1127 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1128 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1129 __atags_pointer);
1130 if (__atags_pointer)
1131 early_print(" r2[]=%*ph\n", 16, atags_vaddr);
1132 dump_machine_table();
1133 }
1134
1135 machine_desc = mdesc;
1136 machine_name = mdesc->name;
1137 dump_stack_set_arch_desc("%s", mdesc->name);
1138
1139 if (mdesc->reboot_mode != REBOOT_HARD)
1140 reboot_mode = mdesc->reboot_mode;
1141
1142 setup_initial_init_mm(_text, _etext, _edata, _end);
1143
1144 /* populate cmd_line too for later use, preserving boot_command_line */
1145 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1146 *cmdline_p = cmd_line;
1147
1148 early_fixmap_init();
1149 early_ioremap_init();
1150
1151 parse_early_param();
1152
1153#ifdef CONFIG_MMU
1154 early_mm_init(mdesc);
1155#endif
1156 setup_dma_zone(mdesc);
1157 xen_early_init();
1158 arm_efi_init();
1159 /*
1160 * Make sure the calculation for lowmem/highmem is set appropriately
1161 * before reserving/allocating any memory
1162 */
1163 adjust_lowmem_bounds();
1164 arm_memblock_init(mdesc);
1165 /* Memory may have been removed so recalculate the bounds. */
1166 adjust_lowmem_bounds();
1167
1168 early_ioremap_reset();
1169
1170 paging_init(mdesc);
1171 kasan_init();
1172 request_standard_resources(mdesc);
1173
1174 if (mdesc->restart) {
1175 __arm_pm_restart = mdesc->restart;
1176 register_restart_handler(&arm_restart_nb);
1177 }
1178
1179 unflatten_device_tree();
1180
1181 arm_dt_init_cpu_maps();
1182 psci_dt_init();
1183#ifdef CONFIG_SMP
1184 if (is_smp()) {
1185 if (!mdesc->smp_init || !mdesc->smp_init()) {
1186 if (psci_smp_available())
1187 smp_set_ops(&psci_smp_ops);
1188 else if (mdesc->smp)
1189 smp_set_ops(mdesc->smp);
1190 }
1191 smp_init_cpus();
1192 smp_build_mpidr_hash();
1193 }
1194#endif
1195
1196 if (!is_smp())
1197 hyp_mode_check();
1198
1199 reserve_crashkernel();
1200
1201#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1202 handle_arch_irq = mdesc->handle_irq;
1203#endif
1204
1205#ifdef CONFIG_VT
1206#if defined(CONFIG_VGA_CONSOLE)
1207 conswitchp = &vga_con;
1208#endif
1209#endif
1210
1211 if (mdesc->init_early)
1212 mdesc->init_early();
1213}
1214
1215
1216static int __init topology_init(void)
1217{
1218 int cpu;
1219
1220 for_each_possible_cpu(cpu) {
1221 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1222 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1223 register_cpu(&cpuinfo->cpu, cpu);
1224 }
1225
1226 return 0;
1227}
1228subsys_initcall(topology_init);
1229
1230#ifdef CONFIG_HAVE_PROC_CPU
1231static int __init proc_cpu_init(void)
1232{
1233 struct proc_dir_entry *res;
1234
1235 res = proc_mkdir("cpu", NULL);
1236 if (!res)
1237 return -ENOMEM;
1238 return 0;
1239}
1240fs_initcall(proc_cpu_init);
1241#endif
1242
1243static const char *hwcap_str[] = {
1244 "swp",
1245 "half",
1246 "thumb",
1247 "26bit",
1248 "fastmult",
1249 "fpa",
1250 "vfp",
1251 "edsp",
1252 "java",
1253 "iwmmxt",
1254 "crunch",
1255 "thumbee",
1256 "neon",
1257 "vfpv3",
1258 "vfpv3d16",
1259 "tls",
1260 "vfpv4",
1261 "idiva",
1262 "idivt",
1263 "vfpd32",
1264 "lpae",
1265 "evtstrm",
1266 "fphp",
1267 "asimdhp",
1268 "asimddp",
1269 "asimdfhm",
1270 "asimdbf16",
1271 "i8mm",
1272 NULL
1273};
1274
1275static const char *hwcap2_str[] = {
1276 "aes",
1277 "pmull",
1278 "sha1",
1279 "sha2",
1280 "crc32",
1281 "sb",
1282 "ssbs",
1283 NULL
1284};
1285
1286static int c_show(struct seq_file *m, void *v)
1287{
1288 int i, j;
1289 u32 cpuid;
1290
1291 for_each_online_cpu(i) {
1292 /*
1293 * glibc reads /proc/cpuinfo to determine the number of
1294 * online processors, looking for lines beginning with
1295 * "processor". Give glibc what it expects.
1296 */
1297 seq_printf(m, "processor\t: %d\n", i);
1298 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1299 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1300 cpu_name, cpuid & 15, elf_platform);
1301
1302#if defined(CONFIG_SMP)
1303 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1304 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1305 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1306#else
1307 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1308 loops_per_jiffy / (500000/HZ),
1309 (loops_per_jiffy / (5000/HZ)) % 100);
1310#endif
1311 /* dump out the processor features */
1312 seq_puts(m, "Features\t: ");
1313
1314 for (j = 0; hwcap_str[j]; j++)
1315 if (elf_hwcap & (1 << j))
1316 seq_printf(m, "%s ", hwcap_str[j]);
1317
1318 for (j = 0; hwcap2_str[j]; j++)
1319 if (elf_hwcap2 & (1 << j))
1320 seq_printf(m, "%s ", hwcap2_str[j]);
1321
1322 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1323 seq_printf(m, "CPU architecture: %s\n",
1324 proc_arch[cpu_architecture()]);
1325
1326 if ((cpuid & 0x0008f000) == 0x00000000) {
1327 /* pre-ARM7 */
1328 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1329 } else {
1330 if ((cpuid & 0x0008f000) == 0x00007000) {
1331 /* ARM7 */
1332 seq_printf(m, "CPU variant\t: 0x%02x\n",
1333 (cpuid >> 16) & 127);
1334 } else {
1335 /* post-ARM7 */
1336 seq_printf(m, "CPU variant\t: 0x%x\n",
1337 (cpuid >> 20) & 15);
1338 }
1339 seq_printf(m, "CPU part\t: 0x%03x\n",
1340 (cpuid >> 4) & 0xfff);
1341 }
1342 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1343 }
1344
1345 seq_printf(m, "Hardware\t: %s\n", machine_name);
1346 seq_printf(m, "Revision\t: %04x\n", system_rev);
1347 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1348
1349 return 0;
1350}
1351
1352static void *c_start(struct seq_file *m, loff_t *pos)
1353{
1354 return *pos < 1 ? (void *)1 : NULL;
1355}
1356
1357static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1358{
1359 ++*pos;
1360 return NULL;
1361}
1362
1363static void c_stop(struct seq_file *m, void *v)
1364{
1365}
1366
1367const struct seq_operations cpuinfo_op = {
1368 .start = c_start,
1369 .next = c_next,
1370 .stop = c_stop,
1371 .show = c_show
1372};