Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
6 */
7#include <linux/efi.h>
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
16#include <linux/seq_file.h>
17#include <linux/screen_info.h>
18#include <linux/init.h>
19#include <linux/kexec.h>
20#include <linux/libfdt.h>
21#include <linux/of.h>
22#include <linux/of_fdt.h>
23#include <linux/cpu.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/proc_fs.h>
27#include <linux/memblock.h>
28#include <linux/bug.h>
29#include <linux/compiler.h>
30#include <linux/sort.h>
31#include <linux/psci.h>
32
33#include <asm/unified.h>
34#include <asm/cp15.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/efi.h>
38#include <asm/elf.h>
39#include <asm/early_ioremap.h>
40#include <asm/fixmap.h>
41#include <asm/procinfo.h>
42#include <asm/psci.h>
43#include <asm/sections.h>
44#include <asm/setup.h>
45#include <asm/smp_plat.h>
46#include <asm/mach-types.h>
47#include <asm/cacheflush.h>
48#include <asm/cachetype.h>
49#include <asm/tlbflush.h>
50#include <asm/xen/hypervisor.h>
51
52#include <asm/prom.h>
53#include <asm/mach/arch.h>
54#include <asm/mach/irq.h>
55#include <asm/mach/time.h>
56#include <asm/system_info.h>
57#include <asm/system_misc.h>
58#include <asm/traps.h>
59#include <asm/unwind.h>
60#include <asm/memblock.h>
61#include <asm/virt.h>
62#include <asm/kasan.h>
63
64#include "atags.h"
65
66
67#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68char fpe_type[8];
69
70static int __init fpe_setup(char *line)
71{
72 memcpy(fpe_type, line, 8);
73 return 1;
74}
75
76__setup("fpe=", fpe_setup);
77#endif
78
79unsigned int processor_id;
80EXPORT_SYMBOL(processor_id);
81unsigned int __machine_arch_type __read_mostly;
82EXPORT_SYMBOL(__machine_arch_type);
83unsigned int cacheid __read_mostly;
84EXPORT_SYMBOL(cacheid);
85
86unsigned int __atags_pointer __initdata;
87
88unsigned int system_rev;
89EXPORT_SYMBOL(system_rev);
90
91const char *system_serial;
92EXPORT_SYMBOL(system_serial);
93
94unsigned int system_serial_low;
95EXPORT_SYMBOL(system_serial_low);
96
97unsigned int system_serial_high;
98EXPORT_SYMBOL(system_serial_high);
99
100unsigned int elf_hwcap __read_mostly;
101EXPORT_SYMBOL(elf_hwcap);
102
103unsigned int elf_hwcap2 __read_mostly;
104EXPORT_SYMBOL(elf_hwcap2);
105
106
107#ifdef MULTI_CPU
108struct processor processor __ro_after_init;
109#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
110struct processor *cpu_vtable[NR_CPUS] = {
111 [0] = &processor,
112};
113#endif
114#endif
115#ifdef MULTI_TLB
116struct cpu_tlb_fns cpu_tlb __ro_after_init;
117#endif
118#ifdef MULTI_USER
119struct cpu_user_fns cpu_user __ro_after_init;
120#endif
121#ifdef MULTI_CACHE
122struct cpu_cache_fns cpu_cache __ro_after_init;
123#endif
124#ifdef CONFIG_OUTER_CACHE
125struct outer_cache_fns outer_cache __ro_after_init;
126EXPORT_SYMBOL(outer_cache);
127#endif
128
129/*
130 * Cached cpu_architecture() result for use by assembler code.
131 * C code should use the cpu_architecture() function instead of accessing this
132 * variable directly.
133 */
134int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
135
136struct stack {
137 u32 irq[4];
138 u32 abt[4];
139 u32 und[4];
140 u32 fiq[4];
141} ____cacheline_aligned;
142
143#ifndef CONFIG_CPU_V7M
144static struct stack stacks[NR_CPUS];
145#endif
146
147char elf_platform[ELF_PLATFORM_SIZE];
148EXPORT_SYMBOL(elf_platform);
149
150static const char *cpu_name;
151static const char *machine_name;
152static char __initdata cmd_line[COMMAND_LINE_SIZE];
153const struct machine_desc *machine_desc __initdata;
154
155static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
156#define ENDIANNESS ((char)endian_test.l)
157
158DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
159
160/*
161 * Standard memory resources
162 */
163static struct resource mem_res[] = {
164 {
165 .name = "Video RAM",
166 .start = 0,
167 .end = 0,
168 .flags = IORESOURCE_MEM
169 },
170 {
171 .name = "Kernel code",
172 .start = 0,
173 .end = 0,
174 .flags = IORESOURCE_SYSTEM_RAM
175 },
176 {
177 .name = "Kernel data",
178 .start = 0,
179 .end = 0,
180 .flags = IORESOURCE_SYSTEM_RAM
181 }
182};
183
184#define video_ram mem_res[0]
185#define kernel_code mem_res[1]
186#define kernel_data mem_res[2]
187
188static struct resource io_res[] = {
189 {
190 .name = "reserved",
191 .start = 0x3bc,
192 .end = 0x3be,
193 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194 },
195 {
196 .name = "reserved",
197 .start = 0x378,
198 .end = 0x37f,
199 .flags = IORESOURCE_IO | IORESOURCE_BUSY
200 },
201 {
202 .name = "reserved",
203 .start = 0x278,
204 .end = 0x27f,
205 .flags = IORESOURCE_IO | IORESOURCE_BUSY
206 }
207};
208
209#define lp0 io_res[0]
210#define lp1 io_res[1]
211#define lp2 io_res[2]
212
213static const char *proc_arch[] = {
214 "undefined/unknown",
215 "3",
216 "4",
217 "4T",
218 "5",
219 "5T",
220 "5TE",
221 "5TEJ",
222 "6TEJ",
223 "7",
224 "7M",
225 "?(12)",
226 "?(13)",
227 "?(14)",
228 "?(15)",
229 "?(16)",
230 "?(17)",
231};
232
233#ifdef CONFIG_CPU_V7M
234static int __get_cpu_architecture(void)
235{
236 return CPU_ARCH_ARMv7M;
237}
238#else
239static int __get_cpu_architecture(void)
240{
241 int cpu_arch;
242
243 if ((read_cpuid_id() & 0x0008f000) == 0) {
244 cpu_arch = CPU_ARCH_UNKNOWN;
245 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
246 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
247 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
248 cpu_arch = (read_cpuid_id() >> 16) & 7;
249 if (cpu_arch)
250 cpu_arch += CPU_ARCH_ARMv3;
251 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
252 /* Revised CPUID format. Read the Memory Model Feature
253 * Register 0 and check for VMSAv7 or PMSAv7 */
254 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030)
257 cpu_arch = CPU_ARCH_ARMv7;
258 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 (mmfr0 & 0x000000f0) == 0x00000020)
260 cpu_arch = CPU_ARCH_ARMv6;
261 else
262 cpu_arch = CPU_ARCH_UNKNOWN;
263 } else
264 cpu_arch = CPU_ARCH_UNKNOWN;
265
266 return cpu_arch;
267}
268#endif
269
270int __pure cpu_architecture(void)
271{
272 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273
274 return __cpu_architecture;
275}
276
277static int cpu_has_aliasing_icache(unsigned int arch)
278{
279 int aliasing_icache;
280 unsigned int id_reg, num_sets, line_size;
281
282 /* PIPT caches never alias. */
283 if (icache_is_pipt())
284 return 0;
285
286 /* arch specifies the register format */
287 switch (arch) {
288 case CPU_ARCH_ARMv7:
289 set_csselr(CSSELR_ICACHE | CSSELR_L1);
290 isb();
291 id_reg = read_ccsidr();
292 line_size = 4 << ((id_reg & 0x7) + 2);
293 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 break;
296 case CPU_ARCH_ARMv6:
297 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 break;
299 default:
300 /* I-cache aliases will be handled by D-cache aliasing code */
301 aliasing_icache = 0;
302 }
303
304 return aliasing_icache;
305}
306
307static void __init cacheid_init(void)
308{
309 unsigned int arch = cpu_architecture();
310
311 if (arch >= CPU_ARCH_ARMv6) {
312 unsigned int cachetype = read_cpuid_cachetype();
313
314 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
315 cacheid = 0;
316 } else if ((cachetype & (7 << 29)) == 4 << 29) {
317 /* ARMv7 register format */
318 arch = CPU_ARCH_ARMv7;
319 cacheid = CACHEID_VIPT_NONALIASING;
320 switch (cachetype & (3 << 14)) {
321 case (1 << 14):
322 cacheid |= CACHEID_ASID_TAGGED;
323 break;
324 case (3 << 14):
325 cacheid |= CACHEID_PIPT;
326 break;
327 }
328 } else {
329 arch = CPU_ARCH_ARMv6;
330 if (cachetype & (1 << 23))
331 cacheid = CACHEID_VIPT_ALIASING;
332 else
333 cacheid = CACHEID_VIPT_NONALIASING;
334 }
335 if (cpu_has_aliasing_icache(arch))
336 cacheid |= CACHEID_VIPT_I_ALIASING;
337 } else {
338 cacheid = CACHEID_VIVT;
339 }
340
341 pr_info("CPU: %s data cache, %s instruction cache\n",
342 cache_is_vivt() ? "VIVT" :
343 cache_is_vipt_aliasing() ? "VIPT aliasing" :
344 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
345 cache_is_vivt() ? "VIVT" :
346 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
347 icache_is_vipt_aliasing() ? "VIPT aliasing" :
348 icache_is_pipt() ? "PIPT" :
349 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
350}
351
352/*
353 * These functions re-use the assembly code in head.S, which
354 * already provide the required functionality.
355 */
356extern struct proc_info_list *lookup_processor_type(unsigned int);
357
358void __init early_print(const char *str, ...)
359{
360 extern void printascii(const char *);
361 char buf[256];
362 va_list ap;
363
364 va_start(ap, str);
365 vsnprintf(buf, sizeof(buf), str, ap);
366 va_end(ap);
367
368#ifdef CONFIG_DEBUG_LL
369 printascii(buf);
370#endif
371 printk("%s", buf);
372}
373
374#ifdef CONFIG_ARM_PATCH_IDIV
375
376static inline u32 __attribute_const__ sdiv_instruction(void)
377{
378 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
379 /* "sdiv r0, r0, r1" */
380 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
381 return __opcode_to_mem_thumb32(insn);
382 }
383
384 /* "sdiv r0, r0, r1" */
385 return __opcode_to_mem_arm(0xe710f110);
386}
387
388static inline u32 __attribute_const__ udiv_instruction(void)
389{
390 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
391 /* "udiv r0, r0, r1" */
392 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
393 return __opcode_to_mem_thumb32(insn);
394 }
395
396 /* "udiv r0, r0, r1" */
397 return __opcode_to_mem_arm(0xe730f110);
398}
399
400static inline u32 __attribute_const__ bx_lr_instruction(void)
401{
402 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
403 /* "bx lr; nop" */
404 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
405 return __opcode_to_mem_thumb32(insn);
406 }
407
408 /* "bx lr" */
409 return __opcode_to_mem_arm(0xe12fff1e);
410}
411
412static void __init patch_aeabi_idiv(void)
413{
414 extern void __aeabi_uidiv(void);
415 extern void __aeabi_idiv(void);
416 uintptr_t fn_addr;
417 unsigned int mask;
418
419 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
420 if (!(elf_hwcap & mask))
421 return;
422
423 pr_info("CPU: div instructions available: patching division code\n");
424
425 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
426 asm ("" : "+g" (fn_addr));
427 ((u32 *)fn_addr)[0] = udiv_instruction();
428 ((u32 *)fn_addr)[1] = bx_lr_instruction();
429 flush_icache_range(fn_addr, fn_addr + 8);
430
431 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
432 asm ("" : "+g" (fn_addr));
433 ((u32 *)fn_addr)[0] = sdiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8);
436}
437
438#else
439static inline void patch_aeabi_idiv(void) { }
440#endif
441
442static void __init cpuid_init_hwcaps(void)
443{
444 int block;
445 u32 isar5;
446 u32 isar6;
447 u32 pfr2;
448
449 if (cpu_architecture() < CPU_ARCH_ARMv7)
450 return;
451
452 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
453 if (block >= 2)
454 elf_hwcap |= HWCAP_IDIVA;
455 if (block >= 1)
456 elf_hwcap |= HWCAP_IDIVT;
457
458 /* LPAE implies atomic ldrd/strd instructions */
459 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
460 if (block >= 5)
461 elf_hwcap |= HWCAP_LPAE;
462
463 /* check for supported v8 Crypto instructions */
464 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
465
466 block = cpuid_feature_extract_field(isar5, 4);
467 if (block >= 2)
468 elf_hwcap2 |= HWCAP2_PMULL;
469 if (block >= 1)
470 elf_hwcap2 |= HWCAP2_AES;
471
472 block = cpuid_feature_extract_field(isar5, 8);
473 if (block >= 1)
474 elf_hwcap2 |= HWCAP2_SHA1;
475
476 block = cpuid_feature_extract_field(isar5, 12);
477 if (block >= 1)
478 elf_hwcap2 |= HWCAP2_SHA2;
479
480 block = cpuid_feature_extract_field(isar5, 16);
481 if (block >= 1)
482 elf_hwcap2 |= HWCAP2_CRC32;
483
484 /* Check for Speculation barrier instruction */
485 isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
486 block = cpuid_feature_extract_field(isar6, 12);
487 if (block >= 1)
488 elf_hwcap2 |= HWCAP2_SB;
489
490 /* Check for Speculative Store Bypassing control */
491 pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
492 block = cpuid_feature_extract_field(pfr2, 4);
493 if (block >= 1)
494 elf_hwcap2 |= HWCAP2_SSBS;
495}
496
497static void __init elf_hwcap_fixup(void)
498{
499 unsigned id = read_cpuid_id();
500
501 /*
502 * HWCAP_TLS is available only on 1136 r1p0 and later,
503 * see also kuser_get_tls_init.
504 */
505 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
506 ((id >> 20) & 3) == 0) {
507 elf_hwcap &= ~HWCAP_TLS;
508 return;
509 }
510
511 /* Verify if CPUID scheme is implemented */
512 if ((id & 0x000f0000) != 0x000f0000)
513 return;
514
515 /*
516 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
517 * avoid advertising SWP; it may not be atomic with
518 * multiprocessing cores.
519 */
520 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
521 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
522 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
523 elf_hwcap &= ~HWCAP_SWP;
524}
525
526/*
527 * cpu_init - initialise one CPU.
528 *
529 * cpu_init sets up the per-CPU stacks.
530 */
531void notrace cpu_init(void)
532{
533#ifndef CONFIG_CPU_V7M
534 unsigned int cpu = smp_processor_id();
535 struct stack *stk = &stacks[cpu];
536
537 if (cpu >= NR_CPUS) {
538 pr_crit("CPU%u: bad primary CPU number\n", cpu);
539 BUG();
540 }
541
542 /*
543 * This only works on resume and secondary cores. For booting on the
544 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
545 */
546 set_my_cpu_offset(per_cpu_offset(cpu));
547
548 cpu_proc_init();
549
550 /*
551 * Define the placement constraint for the inline asm directive below.
552 * In Thumb-2, msr with an immediate value is not allowed.
553 */
554#ifdef CONFIG_THUMB2_KERNEL
555#define PLC_l "l"
556#define PLC_r "r"
557#else
558#define PLC_l "I"
559#define PLC_r "I"
560#endif
561
562 /*
563 * setup stacks for re-entrant exception handlers
564 */
565 __asm__ (
566 "msr cpsr_c, %1\n\t"
567 "add r14, %0, %2\n\t"
568 "mov sp, r14\n\t"
569 "msr cpsr_c, %3\n\t"
570 "add r14, %0, %4\n\t"
571 "mov sp, r14\n\t"
572 "msr cpsr_c, %5\n\t"
573 "add r14, %0, %6\n\t"
574 "mov sp, r14\n\t"
575 "msr cpsr_c, %7\n\t"
576 "add r14, %0, %8\n\t"
577 "mov sp, r14\n\t"
578 "msr cpsr_c, %9"
579 :
580 : "r" (stk),
581 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
582 "I" (offsetof(struct stack, irq[0])),
583 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
584 "I" (offsetof(struct stack, abt[0])),
585 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
586 "I" (offsetof(struct stack, und[0])),
587 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
588 "I" (offsetof(struct stack, fiq[0])),
589 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
590 : "r14");
591#endif
592}
593
594u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
595
596void __init smp_setup_processor_id(void)
597{
598 int i;
599 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
600 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
601
602 cpu_logical_map(0) = cpu;
603 for (i = 1; i < nr_cpu_ids; ++i)
604 cpu_logical_map(i) = i == cpu ? 0 : i;
605
606 /*
607 * clear __my_cpu_offset on boot CPU to avoid hang caused by
608 * using percpu variable early, for example, lockdep will
609 * access percpu variable inside lock_release
610 */
611 set_my_cpu_offset(0);
612
613 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
614}
615
616struct mpidr_hash mpidr_hash;
617#ifdef CONFIG_SMP
618/**
619 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
620 * level in order to build a linear index from an
621 * MPIDR value. Resulting algorithm is a collision
622 * free hash carried out through shifting and ORing
623 */
624static void __init smp_build_mpidr_hash(void)
625{
626 u32 i, affinity;
627 u32 fs[3], bits[3], ls, mask = 0;
628 /*
629 * Pre-scan the list of MPIDRS and filter out bits that do
630 * not contribute to affinity levels, ie they never toggle.
631 */
632 for_each_possible_cpu(i)
633 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
634 pr_debug("mask of set bits 0x%x\n", mask);
635 /*
636 * Find and stash the last and first bit set at all affinity levels to
637 * check how many bits are required to represent them.
638 */
639 for (i = 0; i < 3; i++) {
640 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
641 /*
642 * Find the MSB bit and LSB bits position
643 * to determine how many bits are required
644 * to express the affinity level.
645 */
646 ls = fls(affinity);
647 fs[i] = affinity ? ffs(affinity) - 1 : 0;
648 bits[i] = ls - fs[i];
649 }
650 /*
651 * An index can be created from the MPIDR by isolating the
652 * significant bits at each affinity level and by shifting
653 * them in order to compress the 24 bits values space to a
654 * compressed set of values. This is equivalent to hashing
655 * the MPIDR through shifting and ORing. It is a collision free
656 * hash though not minimal since some levels might contain a number
657 * of CPUs that is not an exact power of 2 and their bit
658 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
659 */
660 mpidr_hash.shift_aff[0] = fs[0];
661 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
662 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
663 (bits[1] + bits[0]);
664 mpidr_hash.mask = mask;
665 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
666 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
667 mpidr_hash.shift_aff[0],
668 mpidr_hash.shift_aff[1],
669 mpidr_hash.shift_aff[2],
670 mpidr_hash.mask,
671 mpidr_hash.bits);
672 /*
673 * 4x is an arbitrary value used to warn on a hash table much bigger
674 * than expected on most systems.
675 */
676 if (mpidr_hash_size() > 4 * num_possible_cpus())
677 pr_warn("Large number of MPIDR hash buckets detected\n");
678 sync_cache_w(&mpidr_hash);
679}
680#endif
681
682/*
683 * locate processor in the list of supported processor types. The linker
684 * builds this table for us from the entries in arch/arm/mm/proc-*.S
685 */
686struct proc_info_list *lookup_processor(u32 midr)
687{
688 struct proc_info_list *list = lookup_processor_type(midr);
689
690 if (!list) {
691 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
692 smp_processor_id(), midr);
693 while (1)
694 /* can't use cpu_relax() here as it may require MMU setup */;
695 }
696
697 return list;
698}
699
700static void __init setup_processor(void)
701{
702 unsigned int midr = read_cpuid_id();
703 struct proc_info_list *list = lookup_processor(midr);
704
705 cpu_name = list->cpu_name;
706 __cpu_architecture = __get_cpu_architecture();
707
708 init_proc_vtable(list->proc);
709#ifdef MULTI_TLB
710 cpu_tlb = *list->tlb;
711#endif
712#ifdef MULTI_USER
713 cpu_user = *list->user;
714#endif
715#ifdef MULTI_CACHE
716 cpu_cache = *list->cache;
717#endif
718
719 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
720 list->cpu_name, midr, midr & 15,
721 proc_arch[cpu_architecture()], get_cr());
722
723 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
724 list->arch_name, ENDIANNESS);
725 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
726 list->elf_name, ENDIANNESS);
727 elf_hwcap = list->elf_hwcap;
728
729 cpuid_init_hwcaps();
730 patch_aeabi_idiv();
731
732#ifndef CONFIG_ARM_THUMB
733 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
734#endif
735#ifdef CONFIG_MMU
736 init_default_cache_policy(list->__cpu_mm_mmu_flags);
737#endif
738 erratum_a15_798181_init();
739
740 elf_hwcap_fixup();
741
742 cacheid_init();
743 cpu_init();
744}
745
746void __init dump_machine_table(void)
747{
748 const struct machine_desc *p;
749
750 early_print("Available machine support:\n\nID (hex)\tNAME\n");
751 for_each_machine_desc(p)
752 early_print("%08x\t%s\n", p->nr, p->name);
753
754 early_print("\nPlease check your kernel config and/or bootloader.\n");
755
756 while (true)
757 /* can't use cpu_relax() here as it may require MMU setup */;
758}
759
760int __init arm_add_memory(u64 start, u64 size)
761{
762 u64 aligned_start;
763
764 /*
765 * Ensure that start/size are aligned to a page boundary.
766 * Size is rounded down, start is rounded up.
767 */
768 aligned_start = PAGE_ALIGN(start);
769 if (aligned_start > start + size)
770 size = 0;
771 else
772 size -= aligned_start - start;
773
774#ifndef CONFIG_PHYS_ADDR_T_64BIT
775 if (aligned_start > ULONG_MAX) {
776 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
777 start);
778 return -EINVAL;
779 }
780
781 if (aligned_start + size > ULONG_MAX) {
782 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
783 (long long)start);
784 /*
785 * To ensure bank->start + bank->size is representable in
786 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
787 * This means we lose a page after masking.
788 */
789 size = ULONG_MAX - aligned_start;
790 }
791#endif
792
793 if (aligned_start < PHYS_OFFSET) {
794 if (aligned_start + size <= PHYS_OFFSET) {
795 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
796 aligned_start, aligned_start + size);
797 return -EINVAL;
798 }
799
800 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
801 aligned_start, (u64)PHYS_OFFSET);
802
803 size -= PHYS_OFFSET - aligned_start;
804 aligned_start = PHYS_OFFSET;
805 }
806
807 start = aligned_start;
808 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
809
810 /*
811 * Check whether this memory region has non-zero size or
812 * invalid node number.
813 */
814 if (size == 0)
815 return -EINVAL;
816
817 memblock_add(start, size);
818 return 0;
819}
820
821/*
822 * Pick out the memory size. We look for mem=size@start,
823 * where start and size are "size[KkMm]"
824 */
825
826static int __init early_mem(char *p)
827{
828 static int usermem __initdata = 0;
829 u64 size;
830 u64 start;
831 char *endp;
832
833 /*
834 * If the user specifies memory size, we
835 * blow away any automatically generated
836 * size.
837 */
838 if (usermem == 0) {
839 usermem = 1;
840 memblock_remove(memblock_start_of_DRAM(),
841 memblock_end_of_DRAM() - memblock_start_of_DRAM());
842 }
843
844 start = PHYS_OFFSET;
845 size = memparse(p, &endp);
846 if (*endp == '@')
847 start = memparse(endp + 1, NULL);
848
849 arm_add_memory(start, size);
850
851 return 0;
852}
853early_param("mem", early_mem);
854
855static void __init request_standard_resources(const struct machine_desc *mdesc)
856{
857 phys_addr_t start, end, res_end;
858 struct resource *res;
859 u64 i;
860
861 kernel_code.start = virt_to_phys(_text);
862 kernel_code.end = virt_to_phys(__init_begin - 1);
863 kernel_data.start = virt_to_phys(_sdata);
864 kernel_data.end = virt_to_phys(_end - 1);
865
866 for_each_mem_range(i, &start, &end) {
867 unsigned long boot_alias_start;
868
869 /*
870 * In memblock, end points to the first byte after the
871 * range while in resourses, end points to the last byte in
872 * the range.
873 */
874 res_end = end - 1;
875
876 /*
877 * Some systems have a special memory alias which is only
878 * used for booting. We need to advertise this region to
879 * kexec-tools so they know where bootable RAM is located.
880 */
881 boot_alias_start = phys_to_idmap(start);
882 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
883 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
884 if (!res)
885 panic("%s: Failed to allocate %zu bytes\n",
886 __func__, sizeof(*res));
887 res->name = "System RAM (boot alias)";
888 res->start = boot_alias_start;
889 res->end = phys_to_idmap(res_end);
890 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
891 request_resource(&iomem_resource, res);
892 }
893
894 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
895 if (!res)
896 panic("%s: Failed to allocate %zu bytes\n", __func__,
897 sizeof(*res));
898 res->name = "System RAM";
899 res->start = start;
900 res->end = res_end;
901 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
902
903 request_resource(&iomem_resource, res);
904
905 if (kernel_code.start >= res->start &&
906 kernel_code.end <= res->end)
907 request_resource(res, &kernel_code);
908 if (kernel_data.start >= res->start &&
909 kernel_data.end <= res->end)
910 request_resource(res, &kernel_data);
911 }
912
913 if (mdesc->video_start) {
914 video_ram.start = mdesc->video_start;
915 video_ram.end = mdesc->video_end;
916 request_resource(&iomem_resource, &video_ram);
917 }
918
919 /*
920 * Some machines don't have the possibility of ever
921 * possessing lp0, lp1 or lp2
922 */
923 if (mdesc->reserve_lp0)
924 request_resource(&ioport_resource, &lp0);
925 if (mdesc->reserve_lp1)
926 request_resource(&ioport_resource, &lp1);
927 if (mdesc->reserve_lp2)
928 request_resource(&ioport_resource, &lp2);
929}
930
931#if defined(CONFIG_VGA_CONSOLE)
932struct screen_info vgacon_screen_info = {
933 .orig_video_lines = 30,
934 .orig_video_cols = 80,
935 .orig_video_mode = 0,
936 .orig_video_ega_bx = 0,
937 .orig_video_isVGA = 1,
938 .orig_video_points = 8
939};
940#endif
941
942static int __init customize_machine(void)
943{
944 /*
945 * customizes platform devices, or adds new ones
946 * On DT based machines, we fall back to populating the
947 * machine from the device tree, if no callback is provided,
948 * otherwise we would always need an init_machine callback.
949 */
950 if (machine_desc->init_machine)
951 machine_desc->init_machine();
952
953 return 0;
954}
955arch_initcall(customize_machine);
956
957static int __init init_machine_late(void)
958{
959 struct device_node *root;
960 int ret;
961
962 if (machine_desc->init_late)
963 machine_desc->init_late();
964
965 root = of_find_node_by_path("/");
966 if (root) {
967 ret = of_property_read_string(root, "serial-number",
968 &system_serial);
969 if (ret)
970 system_serial = NULL;
971 }
972
973 if (!system_serial)
974 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
975 system_serial_high,
976 system_serial_low);
977
978 return 0;
979}
980late_initcall(init_machine_late);
981
982#ifdef CONFIG_CRASH_RESERVE
983/*
984 * The crash region must be aligned to 128MB to avoid
985 * zImage relocating below the reserved region.
986 */
987#define CRASH_ALIGN (128 << 20)
988
989static inline unsigned long long get_total_mem(void)
990{
991 unsigned long total;
992
993 total = max_low_pfn - min_low_pfn;
994 return total << PAGE_SHIFT;
995}
996
997/**
998 * reserve_crashkernel() - reserves memory are for crash kernel
999 *
1000 * This function reserves memory area given in "crashkernel=" kernel command
1001 * line parameter. The memory reserved is used by a dump capture kernel when
1002 * primary kernel is crashing.
1003 */
1004static void __init reserve_crashkernel(void)
1005{
1006 unsigned long long crash_size, crash_base;
1007 unsigned long long total_mem;
1008 int ret;
1009
1010 total_mem = get_total_mem();
1011 ret = parse_crashkernel(boot_command_line, total_mem,
1012 &crash_size, &crash_base,
1013 NULL, NULL);
1014 /* invalid value specified or crashkernel=0 */
1015 if (ret || !crash_size)
1016 return;
1017
1018 if (crash_base <= 0) {
1019 unsigned long long crash_max = idmap_to_phys((u32)~0);
1020 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1021 if (crash_max > lowmem_max)
1022 crash_max = lowmem_max;
1023
1024 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1025 CRASH_ALIGN, crash_max);
1026 if (!crash_base) {
1027 pr_err("crashkernel reservation failed - No suitable area found.\n");
1028 return;
1029 }
1030 } else {
1031 unsigned long long crash_max = crash_base + crash_size;
1032 unsigned long long start;
1033
1034 start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1035 crash_base, crash_max);
1036 if (!start) {
1037 pr_err("crashkernel reservation failed - memory is in use.\n");
1038 return;
1039 }
1040 }
1041
1042 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1043 (unsigned long)(crash_size >> 20),
1044 (unsigned long)(crash_base >> 20),
1045 (unsigned long)(total_mem >> 20));
1046
1047 /* The crashk resource must always be located in normal mem */
1048 crashk_res.start = crash_base;
1049 crashk_res.end = crash_base + crash_size - 1;
1050 insert_resource(&iomem_resource, &crashk_res);
1051
1052 if (arm_has_idmap_alias()) {
1053 /*
1054 * If we have a special RAM alias for use at boot, we
1055 * need to advertise to kexec tools where the alias is.
1056 */
1057 static struct resource crashk_boot_res = {
1058 .name = "Crash kernel (boot alias)",
1059 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1060 };
1061
1062 crashk_boot_res.start = phys_to_idmap(crash_base);
1063 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1064 insert_resource(&iomem_resource, &crashk_boot_res);
1065 }
1066}
1067#else
1068static inline void reserve_crashkernel(void) {}
1069#endif /* CONFIG_CRASH_RESERVE*/
1070
1071void __init hyp_mode_check(void)
1072{
1073#ifdef CONFIG_ARM_VIRT_EXT
1074 sync_boot_mode();
1075
1076 if (is_hyp_mode_available()) {
1077 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1078 pr_info("CPU: Virtualization extensions available.\n");
1079 } else if (is_hyp_mode_mismatched()) {
1080 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1081 __boot_cpu_mode & MODE_MASK);
1082 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1083 } else
1084 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1085#endif
1086}
1087
1088static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1089
1090static int arm_restart(struct notifier_block *nb, unsigned long action,
1091 void *data)
1092{
1093 __arm_pm_restart(action, data);
1094 return NOTIFY_DONE;
1095}
1096
1097static struct notifier_block arm_restart_nb = {
1098 .notifier_call = arm_restart,
1099 .priority = 128,
1100};
1101
1102void __init setup_arch(char **cmdline_p)
1103{
1104 const struct machine_desc *mdesc = NULL;
1105 void *atags_vaddr = NULL;
1106
1107 if (__atags_pointer)
1108 atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1109
1110 setup_processor();
1111 if (atags_vaddr) {
1112 mdesc = setup_machine_fdt(atags_vaddr);
1113 if (mdesc)
1114 memblock_reserve(__atags_pointer,
1115 fdt_totalsize(atags_vaddr));
1116 }
1117 if (!mdesc)
1118 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1119 if (!mdesc) {
1120 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1121 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1122 __atags_pointer);
1123 if (__atags_pointer)
1124 early_print(" r2[]=%*ph\n", 16, atags_vaddr);
1125 dump_machine_table();
1126 }
1127
1128 machine_desc = mdesc;
1129 machine_name = mdesc->name;
1130 dump_stack_set_arch_desc("%s", mdesc->name);
1131
1132 if (mdesc->reboot_mode != REBOOT_HARD)
1133 reboot_mode = mdesc->reboot_mode;
1134
1135 setup_initial_init_mm(_text, _etext, _edata, _end);
1136
1137 /* populate cmd_line too for later use, preserving boot_command_line */
1138 strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1139 *cmdline_p = cmd_line;
1140
1141 early_fixmap_init();
1142 early_ioremap_init();
1143
1144 parse_early_param();
1145
1146#ifdef CONFIG_MMU
1147 early_mm_init(mdesc);
1148#endif
1149 setup_dma_zone(mdesc);
1150 xen_early_init();
1151 arm_efi_init();
1152 /*
1153 * Make sure the calculation for lowmem/highmem is set appropriately
1154 * before reserving/allocating any memory
1155 */
1156 adjust_lowmem_bounds();
1157 arm_memblock_init(mdesc);
1158 /* Memory may have been removed so recalculate the bounds. */
1159 adjust_lowmem_bounds();
1160
1161 early_ioremap_reset();
1162
1163 paging_init(mdesc);
1164 kasan_init();
1165 request_standard_resources(mdesc);
1166
1167 if (mdesc->restart) {
1168 __arm_pm_restart = mdesc->restart;
1169 register_restart_handler(&arm_restart_nb);
1170 }
1171
1172 unflatten_device_tree();
1173
1174 arm_dt_init_cpu_maps();
1175 psci_dt_init();
1176#ifdef CONFIG_SMP
1177 if (is_smp()) {
1178 if (!mdesc->smp_init || !mdesc->smp_init()) {
1179 if (psci_smp_available())
1180 smp_set_ops(&psci_smp_ops);
1181 else if (mdesc->smp)
1182 smp_set_ops(mdesc->smp);
1183 }
1184 smp_init_cpus();
1185 smp_build_mpidr_hash();
1186 }
1187#endif
1188
1189 if (!is_smp())
1190 hyp_mode_check();
1191
1192 reserve_crashkernel();
1193
1194#ifdef CONFIG_VT
1195#if defined(CONFIG_VGA_CONSOLE)
1196 vgacon_register_screen(&vgacon_screen_info);
1197#endif
1198#endif
1199
1200 if (mdesc->init_early)
1201 mdesc->init_early();
1202}
1203
1204bool arch_cpu_is_hotpluggable(int num)
1205{
1206 return platform_can_hotplug_cpu(num);
1207}
1208
1209#ifdef CONFIG_HAVE_PROC_CPU
1210static int __init proc_cpu_init(void)
1211{
1212 struct proc_dir_entry *res;
1213
1214 res = proc_mkdir("cpu", NULL);
1215 if (!res)
1216 return -ENOMEM;
1217 return 0;
1218}
1219fs_initcall(proc_cpu_init);
1220#endif
1221
1222static const char *hwcap_str[] = {
1223 "swp",
1224 "half",
1225 "thumb",
1226 "26bit",
1227 "fastmult",
1228 "fpa",
1229 "vfp",
1230 "edsp",
1231 "java",
1232 "iwmmxt",
1233 "crunch",
1234 "thumbee",
1235 "neon",
1236 "vfpv3",
1237 "vfpv3d16",
1238 "tls",
1239 "vfpv4",
1240 "idiva",
1241 "idivt",
1242 "vfpd32",
1243 "lpae",
1244 "evtstrm",
1245 "fphp",
1246 "asimdhp",
1247 "asimddp",
1248 "asimdfhm",
1249 "asimdbf16",
1250 "i8mm",
1251 NULL
1252};
1253
1254static const char *hwcap2_str[] = {
1255 "aes",
1256 "pmull",
1257 "sha1",
1258 "sha2",
1259 "crc32",
1260 "sb",
1261 "ssbs",
1262 NULL
1263};
1264
1265static int c_show(struct seq_file *m, void *v)
1266{
1267 int i, j;
1268 u32 cpuid;
1269
1270 for_each_online_cpu(i) {
1271 /*
1272 * glibc reads /proc/cpuinfo to determine the number of
1273 * online processors, looking for lines beginning with
1274 * "processor". Give glibc what it expects.
1275 */
1276 seq_printf(m, "processor\t: %d\n", i);
1277 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1278 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1279 cpu_name, cpuid & 15, elf_platform);
1280
1281#if defined(CONFIG_SMP)
1282 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1283 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1284 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1285#else
1286 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1287 loops_per_jiffy / (500000/HZ),
1288 (loops_per_jiffy / (5000/HZ)) % 100);
1289#endif
1290 /* dump out the processor features */
1291 seq_puts(m, "Features\t: ");
1292
1293 for (j = 0; hwcap_str[j]; j++)
1294 if (elf_hwcap & (1 << j))
1295 seq_printf(m, "%s ", hwcap_str[j]);
1296
1297 for (j = 0; hwcap2_str[j]; j++)
1298 if (elf_hwcap2 & (1 << j))
1299 seq_printf(m, "%s ", hwcap2_str[j]);
1300
1301 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1302 seq_printf(m, "CPU architecture: %s\n",
1303 proc_arch[cpu_architecture()]);
1304
1305 if ((cpuid & 0x0008f000) == 0x00000000) {
1306 /* pre-ARM7 */
1307 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1308 } else {
1309 if ((cpuid & 0x0008f000) == 0x00007000) {
1310 /* ARM7 */
1311 seq_printf(m, "CPU variant\t: 0x%02x\n",
1312 (cpuid >> 16) & 127);
1313 } else {
1314 /* post-ARM7 */
1315 seq_printf(m, "CPU variant\t: 0x%x\n",
1316 (cpuid >> 20) & 15);
1317 }
1318 seq_printf(m, "CPU part\t: 0x%03x\n",
1319 (cpuid >> 4) & 0xfff);
1320 }
1321 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1322 }
1323
1324 seq_printf(m, "Hardware\t: %s\n", machine_name);
1325 seq_printf(m, "Revision\t: %04x\n", system_rev);
1326 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1327
1328 return 0;
1329}
1330
1331static void *c_start(struct seq_file *m, loff_t *pos)
1332{
1333 return *pos < 1 ? (void *)1 : NULL;
1334}
1335
1336static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1337{
1338 ++*pos;
1339 return NULL;
1340}
1341
1342static void c_stop(struct seq_file *m, void *v)
1343{
1344}
1345
1346const struct seq_operations cpuinfo_op = {
1347 .start = c_start,
1348 .next = c_next,
1349 .stop = c_stop,
1350 .show = c_show
1351};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
6 */
7#include <linux/efi.h>
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
16#include <linux/seq_file.h>
17#include <linux/screen_info.h>
18#include <linux/of_platform.h>
19#include <linux/init.h>
20#include <linux/kexec.h>
21#include <linux/libfdt.h>
22#include <linux/of_fdt.h>
23#include <linux/cpu.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/proc_fs.h>
27#include <linux/memblock.h>
28#include <linux/bug.h>
29#include <linux/compiler.h>
30#include <linux/sort.h>
31#include <linux/psci.h>
32
33#include <asm/unified.h>
34#include <asm/cp15.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/efi.h>
38#include <asm/elf.h>
39#include <asm/early_ioremap.h>
40#include <asm/fixmap.h>
41#include <asm/procinfo.h>
42#include <asm/psci.h>
43#include <asm/sections.h>
44#include <asm/setup.h>
45#include <asm/smp_plat.h>
46#include <asm/mach-types.h>
47#include <asm/cacheflush.h>
48#include <asm/cachetype.h>
49#include <asm/tlbflush.h>
50#include <asm/xen/hypervisor.h>
51
52#include <asm/prom.h>
53#include <asm/mach/arch.h>
54#include <asm/mach/irq.h>
55#include <asm/mach/time.h>
56#include <asm/system_info.h>
57#include <asm/system_misc.h>
58#include <asm/traps.h>
59#include <asm/unwind.h>
60#include <asm/memblock.h>
61#include <asm/virt.h>
62#include <asm/kasan.h>
63
64#include "atags.h"
65
66
67#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68char fpe_type[8];
69
70static int __init fpe_setup(char *line)
71{
72 memcpy(fpe_type, line, 8);
73 return 1;
74}
75
76__setup("fpe=", fpe_setup);
77#endif
78
79extern void init_default_cache_policy(unsigned long);
80extern void paging_init(const struct machine_desc *desc);
81extern void early_mm_init(const struct machine_desc *);
82extern void adjust_lowmem_bounds(void);
83extern enum reboot_mode reboot_mode;
84extern void setup_dma_zone(const struct machine_desc *desc);
85
86unsigned int processor_id;
87EXPORT_SYMBOL(processor_id);
88unsigned int __machine_arch_type __read_mostly;
89EXPORT_SYMBOL(__machine_arch_type);
90unsigned int cacheid __read_mostly;
91EXPORT_SYMBOL(cacheid);
92
93unsigned int __atags_pointer __initdata;
94
95unsigned int system_rev;
96EXPORT_SYMBOL(system_rev);
97
98const char *system_serial;
99EXPORT_SYMBOL(system_serial);
100
101unsigned int system_serial_low;
102EXPORT_SYMBOL(system_serial_low);
103
104unsigned int system_serial_high;
105EXPORT_SYMBOL(system_serial_high);
106
107unsigned int elf_hwcap __read_mostly;
108EXPORT_SYMBOL(elf_hwcap);
109
110unsigned int elf_hwcap2 __read_mostly;
111EXPORT_SYMBOL(elf_hwcap2);
112
113
114#ifdef MULTI_CPU
115struct processor processor __ro_after_init;
116#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
117struct processor *cpu_vtable[NR_CPUS] = {
118 [0] = &processor,
119};
120#endif
121#endif
122#ifdef MULTI_TLB
123struct cpu_tlb_fns cpu_tlb __ro_after_init;
124#endif
125#ifdef MULTI_USER
126struct cpu_user_fns cpu_user __ro_after_init;
127#endif
128#ifdef MULTI_CACHE
129struct cpu_cache_fns cpu_cache __ro_after_init;
130#endif
131#ifdef CONFIG_OUTER_CACHE
132struct outer_cache_fns outer_cache __ro_after_init;
133EXPORT_SYMBOL(outer_cache);
134#endif
135
136/*
137 * Cached cpu_architecture() result for use by assembler code.
138 * C code should use the cpu_architecture() function instead of accessing this
139 * variable directly.
140 */
141int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
142
143struct stack {
144 u32 irq[4];
145 u32 abt[4];
146 u32 und[4];
147 u32 fiq[4];
148} ____cacheline_aligned;
149
150#ifndef CONFIG_CPU_V7M
151static struct stack stacks[NR_CPUS];
152#endif
153
154char elf_platform[ELF_PLATFORM_SIZE];
155EXPORT_SYMBOL(elf_platform);
156
157static const char *cpu_name;
158static const char *machine_name;
159static char __initdata cmd_line[COMMAND_LINE_SIZE];
160const struct machine_desc *machine_desc __initdata;
161
162static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
163#define ENDIANNESS ((char)endian_test.l)
164
165DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
166
167/*
168 * Standard memory resources
169 */
170static struct resource mem_res[] = {
171 {
172 .name = "Video RAM",
173 .start = 0,
174 .end = 0,
175 .flags = IORESOURCE_MEM
176 },
177 {
178 .name = "Kernel code",
179 .start = 0,
180 .end = 0,
181 .flags = IORESOURCE_SYSTEM_RAM
182 },
183 {
184 .name = "Kernel data",
185 .start = 0,
186 .end = 0,
187 .flags = IORESOURCE_SYSTEM_RAM
188 }
189};
190
191#define video_ram mem_res[0]
192#define kernel_code mem_res[1]
193#define kernel_data mem_res[2]
194
195static struct resource io_res[] = {
196 {
197 .name = "reserved",
198 .start = 0x3bc,
199 .end = 0x3be,
200 .flags = IORESOURCE_IO | IORESOURCE_BUSY
201 },
202 {
203 .name = "reserved",
204 .start = 0x378,
205 .end = 0x37f,
206 .flags = IORESOURCE_IO | IORESOURCE_BUSY
207 },
208 {
209 .name = "reserved",
210 .start = 0x278,
211 .end = 0x27f,
212 .flags = IORESOURCE_IO | IORESOURCE_BUSY
213 }
214};
215
216#define lp0 io_res[0]
217#define lp1 io_res[1]
218#define lp2 io_res[2]
219
220static const char *proc_arch[] = {
221 "undefined/unknown",
222 "3",
223 "4",
224 "4T",
225 "5",
226 "5T",
227 "5TE",
228 "5TEJ",
229 "6TEJ",
230 "7",
231 "7M",
232 "?(12)",
233 "?(13)",
234 "?(14)",
235 "?(15)",
236 "?(16)",
237 "?(17)",
238};
239
240#ifdef CONFIG_CPU_V7M
241static int __get_cpu_architecture(void)
242{
243 return CPU_ARCH_ARMv7M;
244}
245#else
246static int __get_cpu_architecture(void)
247{
248 int cpu_arch;
249
250 if ((read_cpuid_id() & 0x0008f000) == 0) {
251 cpu_arch = CPU_ARCH_UNKNOWN;
252 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
253 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
254 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
255 cpu_arch = (read_cpuid_id() >> 16) & 7;
256 if (cpu_arch)
257 cpu_arch += CPU_ARCH_ARMv3;
258 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
259 /* Revised CPUID format. Read the Memory Model Feature
260 * Register 0 and check for VMSAv7 or PMSAv7 */
261 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
262 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
263 (mmfr0 & 0x000000f0) >= 0x00000030)
264 cpu_arch = CPU_ARCH_ARMv7;
265 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
266 (mmfr0 & 0x000000f0) == 0x00000020)
267 cpu_arch = CPU_ARCH_ARMv6;
268 else
269 cpu_arch = CPU_ARCH_UNKNOWN;
270 } else
271 cpu_arch = CPU_ARCH_UNKNOWN;
272
273 return cpu_arch;
274}
275#endif
276
277int __pure cpu_architecture(void)
278{
279 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
280
281 return __cpu_architecture;
282}
283
284static int cpu_has_aliasing_icache(unsigned int arch)
285{
286 int aliasing_icache;
287 unsigned int id_reg, num_sets, line_size;
288
289 /* PIPT caches never alias. */
290 if (icache_is_pipt())
291 return 0;
292
293 /* arch specifies the register format */
294 switch (arch) {
295 case CPU_ARCH_ARMv7:
296 set_csselr(CSSELR_ICACHE | CSSELR_L1);
297 isb();
298 id_reg = read_ccsidr();
299 line_size = 4 << ((id_reg & 0x7) + 2);
300 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
301 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
302 break;
303 case CPU_ARCH_ARMv6:
304 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
305 break;
306 default:
307 /* I-cache aliases will be handled by D-cache aliasing code */
308 aliasing_icache = 0;
309 }
310
311 return aliasing_icache;
312}
313
314static void __init cacheid_init(void)
315{
316 unsigned int arch = cpu_architecture();
317
318 if (arch >= CPU_ARCH_ARMv6) {
319 unsigned int cachetype = read_cpuid_cachetype();
320
321 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
322 cacheid = 0;
323 } else if ((cachetype & (7 << 29)) == 4 << 29) {
324 /* ARMv7 register format */
325 arch = CPU_ARCH_ARMv7;
326 cacheid = CACHEID_VIPT_NONALIASING;
327 switch (cachetype & (3 << 14)) {
328 case (1 << 14):
329 cacheid |= CACHEID_ASID_TAGGED;
330 break;
331 case (3 << 14):
332 cacheid |= CACHEID_PIPT;
333 break;
334 }
335 } else {
336 arch = CPU_ARCH_ARMv6;
337 if (cachetype & (1 << 23))
338 cacheid = CACHEID_VIPT_ALIASING;
339 else
340 cacheid = CACHEID_VIPT_NONALIASING;
341 }
342 if (cpu_has_aliasing_icache(arch))
343 cacheid |= CACHEID_VIPT_I_ALIASING;
344 } else {
345 cacheid = CACHEID_VIVT;
346 }
347
348 pr_info("CPU: %s data cache, %s instruction cache\n",
349 cache_is_vivt() ? "VIVT" :
350 cache_is_vipt_aliasing() ? "VIPT aliasing" :
351 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
352 cache_is_vivt() ? "VIVT" :
353 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
354 icache_is_vipt_aliasing() ? "VIPT aliasing" :
355 icache_is_pipt() ? "PIPT" :
356 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
357}
358
359/*
360 * These functions re-use the assembly code in head.S, which
361 * already provide the required functionality.
362 */
363extern struct proc_info_list *lookup_processor_type(unsigned int);
364
365void __init early_print(const char *str, ...)
366{
367 extern void printascii(const char *);
368 char buf[256];
369 va_list ap;
370
371 va_start(ap, str);
372 vsnprintf(buf, sizeof(buf), str, ap);
373 va_end(ap);
374
375#ifdef CONFIG_DEBUG_LL
376 printascii(buf);
377#endif
378 printk("%s", buf);
379}
380
381#ifdef CONFIG_ARM_PATCH_IDIV
382
383static inline u32 __attribute_const__ sdiv_instruction(void)
384{
385 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
386 /* "sdiv r0, r0, r1" */
387 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
388 return __opcode_to_mem_thumb32(insn);
389 }
390
391 /* "sdiv r0, r0, r1" */
392 return __opcode_to_mem_arm(0xe710f110);
393}
394
395static inline u32 __attribute_const__ udiv_instruction(void)
396{
397 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
398 /* "udiv r0, r0, r1" */
399 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
400 return __opcode_to_mem_thumb32(insn);
401 }
402
403 /* "udiv r0, r0, r1" */
404 return __opcode_to_mem_arm(0xe730f110);
405}
406
407static inline u32 __attribute_const__ bx_lr_instruction(void)
408{
409 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
410 /* "bx lr; nop" */
411 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
412 return __opcode_to_mem_thumb32(insn);
413 }
414
415 /* "bx lr" */
416 return __opcode_to_mem_arm(0xe12fff1e);
417}
418
419static void __init patch_aeabi_idiv(void)
420{
421 extern void __aeabi_uidiv(void);
422 extern void __aeabi_idiv(void);
423 uintptr_t fn_addr;
424 unsigned int mask;
425
426 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
427 if (!(elf_hwcap & mask))
428 return;
429
430 pr_info("CPU: div instructions available: patching division code\n");
431
432 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
433 asm ("" : "+g" (fn_addr));
434 ((u32 *)fn_addr)[0] = udiv_instruction();
435 ((u32 *)fn_addr)[1] = bx_lr_instruction();
436 flush_icache_range(fn_addr, fn_addr + 8);
437
438 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
439 asm ("" : "+g" (fn_addr));
440 ((u32 *)fn_addr)[0] = sdiv_instruction();
441 ((u32 *)fn_addr)[1] = bx_lr_instruction();
442 flush_icache_range(fn_addr, fn_addr + 8);
443}
444
445#else
446static inline void patch_aeabi_idiv(void) { }
447#endif
448
449static void __init cpuid_init_hwcaps(void)
450{
451 int block;
452 u32 isar5;
453 u32 isar6;
454 u32 pfr2;
455
456 if (cpu_architecture() < CPU_ARCH_ARMv7)
457 return;
458
459 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
460 if (block >= 2)
461 elf_hwcap |= HWCAP_IDIVA;
462 if (block >= 1)
463 elf_hwcap |= HWCAP_IDIVT;
464
465 /* LPAE implies atomic ldrd/strd instructions */
466 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
467 if (block >= 5)
468 elf_hwcap |= HWCAP_LPAE;
469
470 /* check for supported v8 Crypto instructions */
471 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
472
473 block = cpuid_feature_extract_field(isar5, 4);
474 if (block >= 2)
475 elf_hwcap2 |= HWCAP2_PMULL;
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_AES;
478
479 block = cpuid_feature_extract_field(isar5, 8);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA1;
482
483 block = cpuid_feature_extract_field(isar5, 12);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_SHA2;
486
487 block = cpuid_feature_extract_field(isar5, 16);
488 if (block >= 1)
489 elf_hwcap2 |= HWCAP2_CRC32;
490
491 /* Check for Speculation barrier instruction */
492 isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
493 block = cpuid_feature_extract_field(isar6, 12);
494 if (block >= 1)
495 elf_hwcap2 |= HWCAP2_SB;
496
497 /* Check for Speculative Store Bypassing control */
498 pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
499 block = cpuid_feature_extract_field(pfr2, 4);
500 if (block >= 1)
501 elf_hwcap2 |= HWCAP2_SSBS;
502}
503
504static void __init elf_hwcap_fixup(void)
505{
506 unsigned id = read_cpuid_id();
507
508 /*
509 * HWCAP_TLS is available only on 1136 r1p0 and later,
510 * see also kuser_get_tls_init.
511 */
512 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
513 ((id >> 20) & 3) == 0) {
514 elf_hwcap &= ~HWCAP_TLS;
515 return;
516 }
517
518 /* Verify if CPUID scheme is implemented */
519 if ((id & 0x000f0000) != 0x000f0000)
520 return;
521
522 /*
523 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
524 * avoid advertising SWP; it may not be atomic with
525 * multiprocessing cores.
526 */
527 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
528 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
529 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
530 elf_hwcap &= ~HWCAP_SWP;
531}
532
533/*
534 * cpu_init - initialise one CPU.
535 *
536 * cpu_init sets up the per-CPU stacks.
537 */
538void notrace cpu_init(void)
539{
540#ifndef CONFIG_CPU_V7M
541 unsigned int cpu = smp_processor_id();
542 struct stack *stk = &stacks[cpu];
543
544 if (cpu >= NR_CPUS) {
545 pr_crit("CPU%u: bad primary CPU number\n", cpu);
546 BUG();
547 }
548
549 /*
550 * This only works on resume and secondary cores. For booting on the
551 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
552 */
553 set_my_cpu_offset(per_cpu_offset(cpu));
554
555 cpu_proc_init();
556
557 /*
558 * Define the placement constraint for the inline asm directive below.
559 * In Thumb-2, msr with an immediate value is not allowed.
560 */
561#ifdef CONFIG_THUMB2_KERNEL
562#define PLC_l "l"
563#define PLC_r "r"
564#else
565#define PLC_l "I"
566#define PLC_r "I"
567#endif
568
569 /*
570 * setup stacks for re-entrant exception handlers
571 */
572 __asm__ (
573 "msr cpsr_c, %1\n\t"
574 "add r14, %0, %2\n\t"
575 "mov sp, r14\n\t"
576 "msr cpsr_c, %3\n\t"
577 "add r14, %0, %4\n\t"
578 "mov sp, r14\n\t"
579 "msr cpsr_c, %5\n\t"
580 "add r14, %0, %6\n\t"
581 "mov sp, r14\n\t"
582 "msr cpsr_c, %7\n\t"
583 "add r14, %0, %8\n\t"
584 "mov sp, r14\n\t"
585 "msr cpsr_c, %9"
586 :
587 : "r" (stk),
588 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
589 "I" (offsetof(struct stack, irq[0])),
590 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
591 "I" (offsetof(struct stack, abt[0])),
592 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
593 "I" (offsetof(struct stack, und[0])),
594 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
595 "I" (offsetof(struct stack, fiq[0])),
596 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
597 : "r14");
598#endif
599}
600
601u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
602
603void __init smp_setup_processor_id(void)
604{
605 int i;
606 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
607 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
608
609 cpu_logical_map(0) = cpu;
610 for (i = 1; i < nr_cpu_ids; ++i)
611 cpu_logical_map(i) = i == cpu ? 0 : i;
612
613 /*
614 * clear __my_cpu_offset on boot CPU to avoid hang caused by
615 * using percpu variable early, for example, lockdep will
616 * access percpu variable inside lock_release
617 */
618 set_my_cpu_offset(0);
619
620 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
621}
622
623struct mpidr_hash mpidr_hash;
624#ifdef CONFIG_SMP
625/**
626 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
627 * level in order to build a linear index from an
628 * MPIDR value. Resulting algorithm is a collision
629 * free hash carried out through shifting and ORing
630 */
631static void __init smp_build_mpidr_hash(void)
632{
633 u32 i, affinity;
634 u32 fs[3], bits[3], ls, mask = 0;
635 /*
636 * Pre-scan the list of MPIDRS and filter out bits that do
637 * not contribute to affinity levels, ie they never toggle.
638 */
639 for_each_possible_cpu(i)
640 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
641 pr_debug("mask of set bits 0x%x\n", mask);
642 /*
643 * Find and stash the last and first bit set at all affinity levels to
644 * check how many bits are required to represent them.
645 */
646 for (i = 0; i < 3; i++) {
647 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
648 /*
649 * Find the MSB bit and LSB bits position
650 * to determine how many bits are required
651 * to express the affinity level.
652 */
653 ls = fls(affinity);
654 fs[i] = affinity ? ffs(affinity) - 1 : 0;
655 bits[i] = ls - fs[i];
656 }
657 /*
658 * An index can be created from the MPIDR by isolating the
659 * significant bits at each affinity level and by shifting
660 * them in order to compress the 24 bits values space to a
661 * compressed set of values. This is equivalent to hashing
662 * the MPIDR through shifting and ORing. It is a collision free
663 * hash though not minimal since some levels might contain a number
664 * of CPUs that is not an exact power of 2 and their bit
665 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
666 */
667 mpidr_hash.shift_aff[0] = fs[0];
668 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
669 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
670 (bits[1] + bits[0]);
671 mpidr_hash.mask = mask;
672 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
673 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
674 mpidr_hash.shift_aff[0],
675 mpidr_hash.shift_aff[1],
676 mpidr_hash.shift_aff[2],
677 mpidr_hash.mask,
678 mpidr_hash.bits);
679 /*
680 * 4x is an arbitrary value used to warn on a hash table much bigger
681 * than expected on most systems.
682 */
683 if (mpidr_hash_size() > 4 * num_possible_cpus())
684 pr_warn("Large number of MPIDR hash buckets detected\n");
685 sync_cache_w(&mpidr_hash);
686}
687#endif
688
689/*
690 * locate processor in the list of supported processor types. The linker
691 * builds this table for us from the entries in arch/arm/mm/proc-*.S
692 */
693struct proc_info_list *lookup_processor(u32 midr)
694{
695 struct proc_info_list *list = lookup_processor_type(midr);
696
697 if (!list) {
698 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
699 smp_processor_id(), midr);
700 while (1)
701 /* can't use cpu_relax() here as it may require MMU setup */;
702 }
703
704 return list;
705}
706
707static void __init setup_processor(void)
708{
709 unsigned int midr = read_cpuid_id();
710 struct proc_info_list *list = lookup_processor(midr);
711
712 cpu_name = list->cpu_name;
713 __cpu_architecture = __get_cpu_architecture();
714
715 init_proc_vtable(list->proc);
716#ifdef MULTI_TLB
717 cpu_tlb = *list->tlb;
718#endif
719#ifdef MULTI_USER
720 cpu_user = *list->user;
721#endif
722#ifdef MULTI_CACHE
723 cpu_cache = *list->cache;
724#endif
725
726 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
727 list->cpu_name, midr, midr & 15,
728 proc_arch[cpu_architecture()], get_cr());
729
730 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
731 list->arch_name, ENDIANNESS);
732 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
733 list->elf_name, ENDIANNESS);
734 elf_hwcap = list->elf_hwcap;
735
736 cpuid_init_hwcaps();
737 patch_aeabi_idiv();
738
739#ifndef CONFIG_ARM_THUMB
740 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
741#endif
742#ifdef CONFIG_MMU
743 init_default_cache_policy(list->__cpu_mm_mmu_flags);
744#endif
745 erratum_a15_798181_init();
746
747 elf_hwcap_fixup();
748
749 cacheid_init();
750 cpu_init();
751}
752
753void __init dump_machine_table(void)
754{
755 const struct machine_desc *p;
756
757 early_print("Available machine support:\n\nID (hex)\tNAME\n");
758 for_each_machine_desc(p)
759 early_print("%08x\t%s\n", p->nr, p->name);
760
761 early_print("\nPlease check your kernel config and/or bootloader.\n");
762
763 while (true)
764 /* can't use cpu_relax() here as it may require MMU setup */;
765}
766
767int __init arm_add_memory(u64 start, u64 size)
768{
769 u64 aligned_start;
770
771 /*
772 * Ensure that start/size are aligned to a page boundary.
773 * Size is rounded down, start is rounded up.
774 */
775 aligned_start = PAGE_ALIGN(start);
776 if (aligned_start > start + size)
777 size = 0;
778 else
779 size -= aligned_start - start;
780
781#ifndef CONFIG_PHYS_ADDR_T_64BIT
782 if (aligned_start > ULONG_MAX) {
783 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
784 start);
785 return -EINVAL;
786 }
787
788 if (aligned_start + size > ULONG_MAX) {
789 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
790 (long long)start);
791 /*
792 * To ensure bank->start + bank->size is representable in
793 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
794 * This means we lose a page after masking.
795 */
796 size = ULONG_MAX - aligned_start;
797 }
798#endif
799
800 if (aligned_start < PHYS_OFFSET) {
801 if (aligned_start + size <= PHYS_OFFSET) {
802 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
803 aligned_start, aligned_start + size);
804 return -EINVAL;
805 }
806
807 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
808 aligned_start, (u64)PHYS_OFFSET);
809
810 size -= PHYS_OFFSET - aligned_start;
811 aligned_start = PHYS_OFFSET;
812 }
813
814 start = aligned_start;
815 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
816
817 /*
818 * Check whether this memory region has non-zero size or
819 * invalid node number.
820 */
821 if (size == 0)
822 return -EINVAL;
823
824 memblock_add(start, size);
825 return 0;
826}
827
828/*
829 * Pick out the memory size. We look for mem=size@start,
830 * where start and size are "size[KkMm]"
831 */
832
833static int __init early_mem(char *p)
834{
835 static int usermem __initdata = 0;
836 u64 size;
837 u64 start;
838 char *endp;
839
840 /*
841 * If the user specifies memory size, we
842 * blow away any automatically generated
843 * size.
844 */
845 if (usermem == 0) {
846 usermem = 1;
847 memblock_remove(memblock_start_of_DRAM(),
848 memblock_end_of_DRAM() - memblock_start_of_DRAM());
849 }
850
851 start = PHYS_OFFSET;
852 size = memparse(p, &endp);
853 if (*endp == '@')
854 start = memparse(endp + 1, NULL);
855
856 arm_add_memory(start, size);
857
858 return 0;
859}
860early_param("mem", early_mem);
861
862static void __init request_standard_resources(const struct machine_desc *mdesc)
863{
864 phys_addr_t start, end, res_end;
865 struct resource *res;
866 u64 i;
867
868 kernel_code.start = virt_to_phys(_text);
869 kernel_code.end = virt_to_phys(__init_begin - 1);
870 kernel_data.start = virt_to_phys(_sdata);
871 kernel_data.end = virt_to_phys(_end - 1);
872
873 for_each_mem_range(i, &start, &end) {
874 unsigned long boot_alias_start;
875
876 /*
877 * In memblock, end points to the first byte after the
878 * range while in resourses, end points to the last byte in
879 * the range.
880 */
881 res_end = end - 1;
882
883 /*
884 * Some systems have a special memory alias which is only
885 * used for booting. We need to advertise this region to
886 * kexec-tools so they know where bootable RAM is located.
887 */
888 boot_alias_start = phys_to_idmap(start);
889 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
890 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
891 if (!res)
892 panic("%s: Failed to allocate %zu bytes\n",
893 __func__, sizeof(*res));
894 res->name = "System RAM (boot alias)";
895 res->start = boot_alias_start;
896 res->end = phys_to_idmap(res_end);
897 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
898 request_resource(&iomem_resource, res);
899 }
900
901 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
902 if (!res)
903 panic("%s: Failed to allocate %zu bytes\n", __func__,
904 sizeof(*res));
905 res->name = "System RAM";
906 res->start = start;
907 res->end = res_end;
908 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
909
910 request_resource(&iomem_resource, res);
911
912 if (kernel_code.start >= res->start &&
913 kernel_code.end <= res->end)
914 request_resource(res, &kernel_code);
915 if (kernel_data.start >= res->start &&
916 kernel_data.end <= res->end)
917 request_resource(res, &kernel_data);
918 }
919
920 if (mdesc->video_start) {
921 video_ram.start = mdesc->video_start;
922 video_ram.end = mdesc->video_end;
923 request_resource(&iomem_resource, &video_ram);
924 }
925
926 /*
927 * Some machines don't have the possibility of ever
928 * possessing lp0, lp1 or lp2
929 */
930 if (mdesc->reserve_lp0)
931 request_resource(&ioport_resource, &lp0);
932 if (mdesc->reserve_lp1)
933 request_resource(&ioport_resource, &lp1);
934 if (mdesc->reserve_lp2)
935 request_resource(&ioport_resource, &lp2);
936}
937
938#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
939 defined(CONFIG_EFI)
940struct screen_info screen_info = {
941 .orig_video_lines = 30,
942 .orig_video_cols = 80,
943 .orig_video_mode = 0,
944 .orig_video_ega_bx = 0,
945 .orig_video_isVGA = 1,
946 .orig_video_points = 8
947};
948#endif
949
950static int __init customize_machine(void)
951{
952 /*
953 * customizes platform devices, or adds new ones
954 * On DT based machines, we fall back to populating the
955 * machine from the device tree, if no callback is provided,
956 * otherwise we would always need an init_machine callback.
957 */
958 if (machine_desc->init_machine)
959 machine_desc->init_machine();
960
961 return 0;
962}
963arch_initcall(customize_machine);
964
965static int __init init_machine_late(void)
966{
967 struct device_node *root;
968 int ret;
969
970 if (machine_desc->init_late)
971 machine_desc->init_late();
972
973 root = of_find_node_by_path("/");
974 if (root) {
975 ret = of_property_read_string(root, "serial-number",
976 &system_serial);
977 if (ret)
978 system_serial = NULL;
979 }
980
981 if (!system_serial)
982 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
983 system_serial_high,
984 system_serial_low);
985
986 return 0;
987}
988late_initcall(init_machine_late);
989
990#ifdef CONFIG_KEXEC
991/*
992 * The crash region must be aligned to 128MB to avoid
993 * zImage relocating below the reserved region.
994 */
995#define CRASH_ALIGN (128 << 20)
996
997static inline unsigned long long get_total_mem(void)
998{
999 unsigned long total;
1000
1001 total = max_low_pfn - min_low_pfn;
1002 return total << PAGE_SHIFT;
1003}
1004
1005/**
1006 * reserve_crashkernel() - reserves memory are for crash kernel
1007 *
1008 * This function reserves memory area given in "crashkernel=" kernel command
1009 * line parameter. The memory reserved is used by a dump capture kernel when
1010 * primary kernel is crashing.
1011 */
1012static void __init reserve_crashkernel(void)
1013{
1014 unsigned long long crash_size, crash_base;
1015 unsigned long long total_mem;
1016 int ret;
1017
1018 total_mem = get_total_mem();
1019 ret = parse_crashkernel(boot_command_line, total_mem,
1020 &crash_size, &crash_base);
1021 /* invalid value specified or crashkernel=0 */
1022 if (ret || !crash_size)
1023 return;
1024
1025 if (crash_base <= 0) {
1026 unsigned long long crash_max = idmap_to_phys((u32)~0);
1027 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1028 if (crash_max > lowmem_max)
1029 crash_max = lowmem_max;
1030
1031 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1032 CRASH_ALIGN, crash_max);
1033 if (!crash_base) {
1034 pr_err("crashkernel reservation failed - No suitable area found.\n");
1035 return;
1036 }
1037 } else {
1038 unsigned long long crash_max = crash_base + crash_size;
1039 unsigned long long start;
1040
1041 start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1042 crash_base, crash_max);
1043 if (!start) {
1044 pr_err("crashkernel reservation failed - memory is in use.\n");
1045 return;
1046 }
1047 }
1048
1049 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1050 (unsigned long)(crash_size >> 20),
1051 (unsigned long)(crash_base >> 20),
1052 (unsigned long)(total_mem >> 20));
1053
1054 /* The crashk resource must always be located in normal mem */
1055 crashk_res.start = crash_base;
1056 crashk_res.end = crash_base + crash_size - 1;
1057 insert_resource(&iomem_resource, &crashk_res);
1058
1059 if (arm_has_idmap_alias()) {
1060 /*
1061 * If we have a special RAM alias for use at boot, we
1062 * need to advertise to kexec tools where the alias is.
1063 */
1064 static struct resource crashk_boot_res = {
1065 .name = "Crash kernel (boot alias)",
1066 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1067 };
1068
1069 crashk_boot_res.start = phys_to_idmap(crash_base);
1070 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1071 insert_resource(&iomem_resource, &crashk_boot_res);
1072 }
1073}
1074#else
1075static inline void reserve_crashkernel(void) {}
1076#endif /* CONFIG_KEXEC */
1077
1078void __init hyp_mode_check(void)
1079{
1080#ifdef CONFIG_ARM_VIRT_EXT
1081 sync_boot_mode();
1082
1083 if (is_hyp_mode_available()) {
1084 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1085 pr_info("CPU: Virtualization extensions available.\n");
1086 } else if (is_hyp_mode_mismatched()) {
1087 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1088 __boot_cpu_mode & MODE_MASK);
1089 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1090 } else
1091 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1092#endif
1093}
1094
1095static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1096
1097static int arm_restart(struct notifier_block *nb, unsigned long action,
1098 void *data)
1099{
1100 __arm_pm_restart(action, data);
1101 return NOTIFY_DONE;
1102}
1103
1104static struct notifier_block arm_restart_nb = {
1105 .notifier_call = arm_restart,
1106 .priority = 128,
1107};
1108
1109void __init setup_arch(char **cmdline_p)
1110{
1111 const struct machine_desc *mdesc = NULL;
1112 void *atags_vaddr = NULL;
1113
1114 if (__atags_pointer)
1115 atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1116
1117 setup_processor();
1118 if (atags_vaddr) {
1119 mdesc = setup_machine_fdt(atags_vaddr);
1120 if (mdesc)
1121 memblock_reserve(__atags_pointer,
1122 fdt_totalsize(atags_vaddr));
1123 }
1124 if (!mdesc)
1125 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1126 if (!mdesc) {
1127 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1128 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1129 __atags_pointer);
1130 if (__atags_pointer)
1131 early_print(" r2[]=%*ph\n", 16, atags_vaddr);
1132 dump_machine_table();
1133 }
1134
1135 machine_desc = mdesc;
1136 machine_name = mdesc->name;
1137 dump_stack_set_arch_desc("%s", mdesc->name);
1138
1139 if (mdesc->reboot_mode != REBOOT_HARD)
1140 reboot_mode = mdesc->reboot_mode;
1141
1142 setup_initial_init_mm(_text, _etext, _edata, _end);
1143
1144 /* populate cmd_line too for later use, preserving boot_command_line */
1145 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1146 *cmdline_p = cmd_line;
1147
1148 early_fixmap_init();
1149 early_ioremap_init();
1150
1151 parse_early_param();
1152
1153#ifdef CONFIG_MMU
1154 early_mm_init(mdesc);
1155#endif
1156 setup_dma_zone(mdesc);
1157 xen_early_init();
1158 arm_efi_init();
1159 /*
1160 * Make sure the calculation for lowmem/highmem is set appropriately
1161 * before reserving/allocating any memory
1162 */
1163 adjust_lowmem_bounds();
1164 arm_memblock_init(mdesc);
1165 /* Memory may have been removed so recalculate the bounds. */
1166 adjust_lowmem_bounds();
1167
1168 early_ioremap_reset();
1169
1170 paging_init(mdesc);
1171 kasan_init();
1172 request_standard_resources(mdesc);
1173
1174 if (mdesc->restart) {
1175 __arm_pm_restart = mdesc->restart;
1176 register_restart_handler(&arm_restart_nb);
1177 }
1178
1179 unflatten_device_tree();
1180
1181 arm_dt_init_cpu_maps();
1182 psci_dt_init();
1183#ifdef CONFIG_SMP
1184 if (is_smp()) {
1185 if (!mdesc->smp_init || !mdesc->smp_init()) {
1186 if (psci_smp_available())
1187 smp_set_ops(&psci_smp_ops);
1188 else if (mdesc->smp)
1189 smp_set_ops(mdesc->smp);
1190 }
1191 smp_init_cpus();
1192 smp_build_mpidr_hash();
1193 }
1194#endif
1195
1196 if (!is_smp())
1197 hyp_mode_check();
1198
1199 reserve_crashkernel();
1200
1201#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1202 handle_arch_irq = mdesc->handle_irq;
1203#endif
1204
1205#ifdef CONFIG_VT
1206#if defined(CONFIG_VGA_CONSOLE)
1207 conswitchp = &vga_con;
1208#endif
1209#endif
1210
1211 if (mdesc->init_early)
1212 mdesc->init_early();
1213}
1214
1215
1216static int __init topology_init(void)
1217{
1218 int cpu;
1219
1220 for_each_possible_cpu(cpu) {
1221 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1222 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1223 register_cpu(&cpuinfo->cpu, cpu);
1224 }
1225
1226 return 0;
1227}
1228subsys_initcall(topology_init);
1229
1230#ifdef CONFIG_HAVE_PROC_CPU
1231static int __init proc_cpu_init(void)
1232{
1233 struct proc_dir_entry *res;
1234
1235 res = proc_mkdir("cpu", NULL);
1236 if (!res)
1237 return -ENOMEM;
1238 return 0;
1239}
1240fs_initcall(proc_cpu_init);
1241#endif
1242
1243static const char *hwcap_str[] = {
1244 "swp",
1245 "half",
1246 "thumb",
1247 "26bit",
1248 "fastmult",
1249 "fpa",
1250 "vfp",
1251 "edsp",
1252 "java",
1253 "iwmmxt",
1254 "crunch",
1255 "thumbee",
1256 "neon",
1257 "vfpv3",
1258 "vfpv3d16",
1259 "tls",
1260 "vfpv4",
1261 "idiva",
1262 "idivt",
1263 "vfpd32",
1264 "lpae",
1265 "evtstrm",
1266 "fphp",
1267 "asimdhp",
1268 "asimddp",
1269 "asimdfhm",
1270 "asimdbf16",
1271 "i8mm",
1272 NULL
1273};
1274
1275static const char *hwcap2_str[] = {
1276 "aes",
1277 "pmull",
1278 "sha1",
1279 "sha2",
1280 "crc32",
1281 "sb",
1282 "ssbs",
1283 NULL
1284};
1285
1286static int c_show(struct seq_file *m, void *v)
1287{
1288 int i, j;
1289 u32 cpuid;
1290
1291 for_each_online_cpu(i) {
1292 /*
1293 * glibc reads /proc/cpuinfo to determine the number of
1294 * online processors, looking for lines beginning with
1295 * "processor". Give glibc what it expects.
1296 */
1297 seq_printf(m, "processor\t: %d\n", i);
1298 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1299 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1300 cpu_name, cpuid & 15, elf_platform);
1301
1302#if defined(CONFIG_SMP)
1303 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1304 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1305 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1306#else
1307 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1308 loops_per_jiffy / (500000/HZ),
1309 (loops_per_jiffy / (5000/HZ)) % 100);
1310#endif
1311 /* dump out the processor features */
1312 seq_puts(m, "Features\t: ");
1313
1314 for (j = 0; hwcap_str[j]; j++)
1315 if (elf_hwcap & (1 << j))
1316 seq_printf(m, "%s ", hwcap_str[j]);
1317
1318 for (j = 0; hwcap2_str[j]; j++)
1319 if (elf_hwcap2 & (1 << j))
1320 seq_printf(m, "%s ", hwcap2_str[j]);
1321
1322 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1323 seq_printf(m, "CPU architecture: %s\n",
1324 proc_arch[cpu_architecture()]);
1325
1326 if ((cpuid & 0x0008f000) == 0x00000000) {
1327 /* pre-ARM7 */
1328 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1329 } else {
1330 if ((cpuid & 0x0008f000) == 0x00007000) {
1331 /* ARM7 */
1332 seq_printf(m, "CPU variant\t: 0x%02x\n",
1333 (cpuid >> 16) & 127);
1334 } else {
1335 /* post-ARM7 */
1336 seq_printf(m, "CPU variant\t: 0x%x\n",
1337 (cpuid >> 20) & 15);
1338 }
1339 seq_printf(m, "CPU part\t: 0x%03x\n",
1340 (cpuid >> 4) & 0xfff);
1341 }
1342 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1343 }
1344
1345 seq_printf(m, "Hardware\t: %s\n", machine_name);
1346 seq_printf(m, "Revision\t: %04x\n", system_rev);
1347 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1348
1349 return 0;
1350}
1351
1352static void *c_start(struct seq_file *m, loff_t *pos)
1353{
1354 return *pos < 1 ? (void *)1 : NULL;
1355}
1356
1357static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1358{
1359 ++*pos;
1360 return NULL;
1361}
1362
1363static void c_stop(struct seq_file *m, void *v)
1364{
1365}
1366
1367const struct seq_operations cpuinfo_op = {
1368 .start = c_start,
1369 .next = c_next,
1370 .stop = c_stop,
1371 .show = c_show
1372};