Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
6 */
7#include <linux/efi.h>
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
16#include <linux/seq_file.h>
17#include <linux/screen_info.h>
18#include <linux/init.h>
19#include <linux/kexec.h>
20#include <linux/libfdt.h>
21#include <linux/of.h>
22#include <linux/of_fdt.h>
23#include <linux/cpu.h>
24#include <linux/interrupt.h>
25#include <linux/smp.h>
26#include <linux/proc_fs.h>
27#include <linux/memblock.h>
28#include <linux/bug.h>
29#include <linux/compiler.h>
30#include <linux/sort.h>
31#include <linux/psci.h>
32
33#include <asm/unified.h>
34#include <asm/cp15.h>
35#include <asm/cpu.h>
36#include <asm/cputype.h>
37#include <asm/efi.h>
38#include <asm/elf.h>
39#include <asm/early_ioremap.h>
40#include <asm/fixmap.h>
41#include <asm/procinfo.h>
42#include <asm/psci.h>
43#include <asm/sections.h>
44#include <asm/setup.h>
45#include <asm/smp_plat.h>
46#include <asm/mach-types.h>
47#include <asm/cacheflush.h>
48#include <asm/cachetype.h>
49#include <asm/tlbflush.h>
50#include <asm/xen/hypervisor.h>
51
52#include <asm/prom.h>
53#include <asm/mach/arch.h>
54#include <asm/mach/irq.h>
55#include <asm/mach/time.h>
56#include <asm/system_info.h>
57#include <asm/system_misc.h>
58#include <asm/traps.h>
59#include <asm/unwind.h>
60#include <asm/memblock.h>
61#include <asm/virt.h>
62#include <asm/kasan.h>
63
64#include "atags.h"
65
66
67#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
68char fpe_type[8];
69
70static int __init fpe_setup(char *line)
71{
72 memcpy(fpe_type, line, 8);
73 return 1;
74}
75
76__setup("fpe=", fpe_setup);
77#endif
78
79unsigned int processor_id;
80EXPORT_SYMBOL(processor_id);
81unsigned int __machine_arch_type __read_mostly;
82EXPORT_SYMBOL(__machine_arch_type);
83unsigned int cacheid __read_mostly;
84EXPORT_SYMBOL(cacheid);
85
86unsigned int __atags_pointer __initdata;
87
88unsigned int system_rev;
89EXPORT_SYMBOL(system_rev);
90
91const char *system_serial;
92EXPORT_SYMBOL(system_serial);
93
94unsigned int system_serial_low;
95EXPORT_SYMBOL(system_serial_low);
96
97unsigned int system_serial_high;
98EXPORT_SYMBOL(system_serial_high);
99
100unsigned int elf_hwcap __read_mostly;
101EXPORT_SYMBOL(elf_hwcap);
102
103unsigned int elf_hwcap2 __read_mostly;
104EXPORT_SYMBOL(elf_hwcap2);
105
106
107#ifdef MULTI_CPU
108struct processor processor __ro_after_init;
109#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
110struct processor *cpu_vtable[NR_CPUS] = {
111 [0] = &processor,
112};
113#endif
114#endif
115#ifdef MULTI_TLB
116struct cpu_tlb_fns cpu_tlb __ro_after_init;
117#endif
118#ifdef MULTI_USER
119struct cpu_user_fns cpu_user __ro_after_init;
120#endif
121#ifdef MULTI_CACHE
122struct cpu_cache_fns cpu_cache __ro_after_init;
123#endif
124#ifdef CONFIG_OUTER_CACHE
125struct outer_cache_fns outer_cache __ro_after_init;
126EXPORT_SYMBOL(outer_cache);
127#endif
128
129/*
130 * Cached cpu_architecture() result for use by assembler code.
131 * C code should use the cpu_architecture() function instead of accessing this
132 * variable directly.
133 */
134int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
135
136struct stack {
137 u32 irq[4];
138 u32 abt[4];
139 u32 und[4];
140 u32 fiq[4];
141} ____cacheline_aligned;
142
143#ifndef CONFIG_CPU_V7M
144static struct stack stacks[NR_CPUS];
145#endif
146
147char elf_platform[ELF_PLATFORM_SIZE];
148EXPORT_SYMBOL(elf_platform);
149
150static const char *cpu_name;
151static const char *machine_name;
152static char __initdata cmd_line[COMMAND_LINE_SIZE];
153const struct machine_desc *machine_desc __initdata;
154
155static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
156#define ENDIANNESS ((char)endian_test.l)
157
158DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
159
160/*
161 * Standard memory resources
162 */
163static struct resource mem_res[] = {
164 {
165 .name = "Video RAM",
166 .start = 0,
167 .end = 0,
168 .flags = IORESOURCE_MEM
169 },
170 {
171 .name = "Kernel code",
172 .start = 0,
173 .end = 0,
174 .flags = IORESOURCE_SYSTEM_RAM
175 },
176 {
177 .name = "Kernel data",
178 .start = 0,
179 .end = 0,
180 .flags = IORESOURCE_SYSTEM_RAM
181 }
182};
183
184#define video_ram mem_res[0]
185#define kernel_code mem_res[1]
186#define kernel_data mem_res[2]
187
188static struct resource io_res[] = {
189 {
190 .name = "reserved",
191 .start = 0x3bc,
192 .end = 0x3be,
193 .flags = IORESOURCE_IO | IORESOURCE_BUSY
194 },
195 {
196 .name = "reserved",
197 .start = 0x378,
198 .end = 0x37f,
199 .flags = IORESOURCE_IO | IORESOURCE_BUSY
200 },
201 {
202 .name = "reserved",
203 .start = 0x278,
204 .end = 0x27f,
205 .flags = IORESOURCE_IO | IORESOURCE_BUSY
206 }
207};
208
209#define lp0 io_res[0]
210#define lp1 io_res[1]
211#define lp2 io_res[2]
212
213static const char *proc_arch[] = {
214 "undefined/unknown",
215 "3",
216 "4",
217 "4T",
218 "5",
219 "5T",
220 "5TE",
221 "5TEJ",
222 "6TEJ",
223 "7",
224 "7M",
225 "?(12)",
226 "?(13)",
227 "?(14)",
228 "?(15)",
229 "?(16)",
230 "?(17)",
231};
232
233#ifdef CONFIG_CPU_V7M
234static int __get_cpu_architecture(void)
235{
236 return CPU_ARCH_ARMv7M;
237}
238#else
239static int __get_cpu_architecture(void)
240{
241 int cpu_arch;
242
243 if ((read_cpuid_id() & 0x0008f000) == 0) {
244 cpu_arch = CPU_ARCH_UNKNOWN;
245 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
246 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
247 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
248 cpu_arch = (read_cpuid_id() >> 16) & 7;
249 if (cpu_arch)
250 cpu_arch += CPU_ARCH_ARMv3;
251 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
252 /* Revised CPUID format. Read the Memory Model Feature
253 * Register 0 and check for VMSAv7 or PMSAv7 */
254 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
255 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
256 (mmfr0 & 0x000000f0) >= 0x00000030)
257 cpu_arch = CPU_ARCH_ARMv7;
258 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
259 (mmfr0 & 0x000000f0) == 0x00000020)
260 cpu_arch = CPU_ARCH_ARMv6;
261 else
262 cpu_arch = CPU_ARCH_UNKNOWN;
263 } else
264 cpu_arch = CPU_ARCH_UNKNOWN;
265
266 return cpu_arch;
267}
268#endif
269
270int __pure cpu_architecture(void)
271{
272 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
273
274 return __cpu_architecture;
275}
276
277static int cpu_has_aliasing_icache(unsigned int arch)
278{
279 int aliasing_icache;
280 unsigned int id_reg, num_sets, line_size;
281
282 /* PIPT caches never alias. */
283 if (icache_is_pipt())
284 return 0;
285
286 /* arch specifies the register format */
287 switch (arch) {
288 case CPU_ARCH_ARMv7:
289 set_csselr(CSSELR_ICACHE | CSSELR_L1);
290 isb();
291 id_reg = read_ccsidr();
292 line_size = 4 << ((id_reg & 0x7) + 2);
293 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
294 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
295 break;
296 case CPU_ARCH_ARMv6:
297 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
298 break;
299 default:
300 /* I-cache aliases will be handled by D-cache aliasing code */
301 aliasing_icache = 0;
302 }
303
304 return aliasing_icache;
305}
306
307static void __init cacheid_init(void)
308{
309 unsigned int arch = cpu_architecture();
310
311 if (arch >= CPU_ARCH_ARMv6) {
312 unsigned int cachetype = read_cpuid_cachetype();
313
314 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
315 cacheid = 0;
316 } else if ((cachetype & (7 << 29)) == 4 << 29) {
317 /* ARMv7 register format */
318 arch = CPU_ARCH_ARMv7;
319 cacheid = CACHEID_VIPT_NONALIASING;
320 switch (cachetype & (3 << 14)) {
321 case (1 << 14):
322 cacheid |= CACHEID_ASID_TAGGED;
323 break;
324 case (3 << 14):
325 cacheid |= CACHEID_PIPT;
326 break;
327 }
328 } else {
329 arch = CPU_ARCH_ARMv6;
330 if (cachetype & (1 << 23))
331 cacheid = CACHEID_VIPT_ALIASING;
332 else
333 cacheid = CACHEID_VIPT_NONALIASING;
334 }
335 if (cpu_has_aliasing_icache(arch))
336 cacheid |= CACHEID_VIPT_I_ALIASING;
337 } else {
338 cacheid = CACHEID_VIVT;
339 }
340
341 pr_info("CPU: %s data cache, %s instruction cache\n",
342 cache_is_vivt() ? "VIVT" :
343 cache_is_vipt_aliasing() ? "VIPT aliasing" :
344 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
345 cache_is_vivt() ? "VIVT" :
346 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
347 icache_is_vipt_aliasing() ? "VIPT aliasing" :
348 icache_is_pipt() ? "PIPT" :
349 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
350}
351
352/*
353 * These functions re-use the assembly code in head.S, which
354 * already provide the required functionality.
355 */
356extern struct proc_info_list *lookup_processor_type(unsigned int);
357
358void __init early_print(const char *str, ...)
359{
360 extern void printascii(const char *);
361 char buf[256];
362 va_list ap;
363
364 va_start(ap, str);
365 vsnprintf(buf, sizeof(buf), str, ap);
366 va_end(ap);
367
368#ifdef CONFIG_DEBUG_LL
369 printascii(buf);
370#endif
371 printk("%s", buf);
372}
373
374#ifdef CONFIG_ARM_PATCH_IDIV
375
376static inline u32 __attribute_const__ sdiv_instruction(void)
377{
378 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
379 /* "sdiv r0, r0, r1" */
380 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
381 return __opcode_to_mem_thumb32(insn);
382 }
383
384 /* "sdiv r0, r0, r1" */
385 return __opcode_to_mem_arm(0xe710f110);
386}
387
388static inline u32 __attribute_const__ udiv_instruction(void)
389{
390 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
391 /* "udiv r0, r0, r1" */
392 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
393 return __opcode_to_mem_thumb32(insn);
394 }
395
396 /* "udiv r0, r0, r1" */
397 return __opcode_to_mem_arm(0xe730f110);
398}
399
400static inline u32 __attribute_const__ bx_lr_instruction(void)
401{
402 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
403 /* "bx lr; nop" */
404 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
405 return __opcode_to_mem_thumb32(insn);
406 }
407
408 /* "bx lr" */
409 return __opcode_to_mem_arm(0xe12fff1e);
410}
411
412static void __init patch_aeabi_idiv(void)
413{
414 extern void __aeabi_uidiv(void);
415 extern void __aeabi_idiv(void);
416 uintptr_t fn_addr;
417 unsigned int mask;
418
419 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
420 if (!(elf_hwcap & mask))
421 return;
422
423 pr_info("CPU: div instructions available: patching division code\n");
424
425 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
426 asm ("" : "+g" (fn_addr));
427 ((u32 *)fn_addr)[0] = udiv_instruction();
428 ((u32 *)fn_addr)[1] = bx_lr_instruction();
429 flush_icache_range(fn_addr, fn_addr + 8);
430
431 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
432 asm ("" : "+g" (fn_addr));
433 ((u32 *)fn_addr)[0] = sdiv_instruction();
434 ((u32 *)fn_addr)[1] = bx_lr_instruction();
435 flush_icache_range(fn_addr, fn_addr + 8);
436}
437
438#else
439static inline void patch_aeabi_idiv(void) { }
440#endif
441
442static void __init cpuid_init_hwcaps(void)
443{
444 int block;
445 u32 isar5;
446 u32 isar6;
447 u32 pfr2;
448
449 if (cpu_architecture() < CPU_ARCH_ARMv7)
450 return;
451
452 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
453 if (block >= 2)
454 elf_hwcap |= HWCAP_IDIVA;
455 if (block >= 1)
456 elf_hwcap |= HWCAP_IDIVT;
457
458 /* LPAE implies atomic ldrd/strd instructions */
459 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
460 if (block >= 5)
461 elf_hwcap |= HWCAP_LPAE;
462
463 /* check for supported v8 Crypto instructions */
464 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
465
466 block = cpuid_feature_extract_field(isar5, 4);
467 if (block >= 2)
468 elf_hwcap2 |= HWCAP2_PMULL;
469 if (block >= 1)
470 elf_hwcap2 |= HWCAP2_AES;
471
472 block = cpuid_feature_extract_field(isar5, 8);
473 if (block >= 1)
474 elf_hwcap2 |= HWCAP2_SHA1;
475
476 block = cpuid_feature_extract_field(isar5, 12);
477 if (block >= 1)
478 elf_hwcap2 |= HWCAP2_SHA2;
479
480 block = cpuid_feature_extract_field(isar5, 16);
481 if (block >= 1)
482 elf_hwcap2 |= HWCAP2_CRC32;
483
484 /* Check for Speculation barrier instruction */
485 isar6 = read_cpuid_ext(CPUID_EXT_ISAR6);
486 block = cpuid_feature_extract_field(isar6, 12);
487 if (block >= 1)
488 elf_hwcap2 |= HWCAP2_SB;
489
490 /* Check for Speculative Store Bypassing control */
491 pfr2 = read_cpuid_ext(CPUID_EXT_PFR2);
492 block = cpuid_feature_extract_field(pfr2, 4);
493 if (block >= 1)
494 elf_hwcap2 |= HWCAP2_SSBS;
495}
496
497static void __init elf_hwcap_fixup(void)
498{
499 unsigned id = read_cpuid_id();
500
501 /*
502 * HWCAP_TLS is available only on 1136 r1p0 and later,
503 * see also kuser_get_tls_init.
504 */
505 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
506 ((id >> 20) & 3) == 0) {
507 elf_hwcap &= ~HWCAP_TLS;
508 return;
509 }
510
511 /* Verify if CPUID scheme is implemented */
512 if ((id & 0x000f0000) != 0x000f0000)
513 return;
514
515 /*
516 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
517 * avoid advertising SWP; it may not be atomic with
518 * multiprocessing cores.
519 */
520 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
521 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
522 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
523 elf_hwcap &= ~HWCAP_SWP;
524}
525
526/*
527 * cpu_init - initialise one CPU.
528 *
529 * cpu_init sets up the per-CPU stacks.
530 */
531void notrace cpu_init(void)
532{
533#ifndef CONFIG_CPU_V7M
534 unsigned int cpu = smp_processor_id();
535 struct stack *stk = &stacks[cpu];
536
537 if (cpu >= NR_CPUS) {
538 pr_crit("CPU%u: bad primary CPU number\n", cpu);
539 BUG();
540 }
541
542 /*
543 * This only works on resume and secondary cores. For booting on the
544 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
545 */
546 set_my_cpu_offset(per_cpu_offset(cpu));
547
548 cpu_proc_init();
549
550 /*
551 * Define the placement constraint for the inline asm directive below.
552 * In Thumb-2, msr with an immediate value is not allowed.
553 */
554#ifdef CONFIG_THUMB2_KERNEL
555#define PLC_l "l"
556#define PLC_r "r"
557#else
558#define PLC_l "I"
559#define PLC_r "I"
560#endif
561
562 /*
563 * setup stacks for re-entrant exception handlers
564 */
565 __asm__ (
566 "msr cpsr_c, %1\n\t"
567 "add r14, %0, %2\n\t"
568 "mov sp, r14\n\t"
569 "msr cpsr_c, %3\n\t"
570 "add r14, %0, %4\n\t"
571 "mov sp, r14\n\t"
572 "msr cpsr_c, %5\n\t"
573 "add r14, %0, %6\n\t"
574 "mov sp, r14\n\t"
575 "msr cpsr_c, %7\n\t"
576 "add r14, %0, %8\n\t"
577 "mov sp, r14\n\t"
578 "msr cpsr_c, %9"
579 :
580 : "r" (stk),
581 PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
582 "I" (offsetof(struct stack, irq[0])),
583 PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
584 "I" (offsetof(struct stack, abt[0])),
585 PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
586 "I" (offsetof(struct stack, und[0])),
587 PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
588 "I" (offsetof(struct stack, fiq[0])),
589 PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
590 : "r14");
591#endif
592}
593
594u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
595
596void __init smp_setup_processor_id(void)
597{
598 int i;
599 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
600 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
601
602 cpu_logical_map(0) = cpu;
603 for (i = 1; i < nr_cpu_ids; ++i)
604 cpu_logical_map(i) = i == cpu ? 0 : i;
605
606 /*
607 * clear __my_cpu_offset on boot CPU to avoid hang caused by
608 * using percpu variable early, for example, lockdep will
609 * access percpu variable inside lock_release
610 */
611 set_my_cpu_offset(0);
612
613 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
614}
615
616struct mpidr_hash mpidr_hash;
617#ifdef CONFIG_SMP
618/**
619 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
620 * level in order to build a linear index from an
621 * MPIDR value. Resulting algorithm is a collision
622 * free hash carried out through shifting and ORing
623 */
624static void __init smp_build_mpidr_hash(void)
625{
626 u32 i, affinity;
627 u32 fs[3], bits[3], ls, mask = 0;
628 /*
629 * Pre-scan the list of MPIDRS and filter out bits that do
630 * not contribute to affinity levels, ie they never toggle.
631 */
632 for_each_possible_cpu(i)
633 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
634 pr_debug("mask of set bits 0x%x\n", mask);
635 /*
636 * Find and stash the last and first bit set at all affinity levels to
637 * check how many bits are required to represent them.
638 */
639 for (i = 0; i < 3; i++) {
640 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
641 /*
642 * Find the MSB bit and LSB bits position
643 * to determine how many bits are required
644 * to express the affinity level.
645 */
646 ls = fls(affinity);
647 fs[i] = affinity ? ffs(affinity) - 1 : 0;
648 bits[i] = ls - fs[i];
649 }
650 /*
651 * An index can be created from the MPIDR by isolating the
652 * significant bits at each affinity level and by shifting
653 * them in order to compress the 24 bits values space to a
654 * compressed set of values. This is equivalent to hashing
655 * the MPIDR through shifting and ORing. It is a collision free
656 * hash though not minimal since some levels might contain a number
657 * of CPUs that is not an exact power of 2 and their bit
658 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
659 */
660 mpidr_hash.shift_aff[0] = fs[0];
661 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
662 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
663 (bits[1] + bits[0]);
664 mpidr_hash.mask = mask;
665 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
666 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
667 mpidr_hash.shift_aff[0],
668 mpidr_hash.shift_aff[1],
669 mpidr_hash.shift_aff[2],
670 mpidr_hash.mask,
671 mpidr_hash.bits);
672 /*
673 * 4x is an arbitrary value used to warn on a hash table much bigger
674 * than expected on most systems.
675 */
676 if (mpidr_hash_size() > 4 * num_possible_cpus())
677 pr_warn("Large number of MPIDR hash buckets detected\n");
678 sync_cache_w(&mpidr_hash);
679}
680#endif
681
682/*
683 * locate processor in the list of supported processor types. The linker
684 * builds this table for us from the entries in arch/arm/mm/proc-*.S
685 */
686struct proc_info_list *lookup_processor(u32 midr)
687{
688 struct proc_info_list *list = lookup_processor_type(midr);
689
690 if (!list) {
691 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
692 smp_processor_id(), midr);
693 while (1)
694 /* can't use cpu_relax() here as it may require MMU setup */;
695 }
696
697 return list;
698}
699
700static void __init setup_processor(void)
701{
702 unsigned int midr = read_cpuid_id();
703 struct proc_info_list *list = lookup_processor(midr);
704
705 cpu_name = list->cpu_name;
706 __cpu_architecture = __get_cpu_architecture();
707
708 init_proc_vtable(list->proc);
709#ifdef MULTI_TLB
710 cpu_tlb = *list->tlb;
711#endif
712#ifdef MULTI_USER
713 cpu_user = *list->user;
714#endif
715#ifdef MULTI_CACHE
716 cpu_cache = *list->cache;
717#endif
718
719 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
720 list->cpu_name, midr, midr & 15,
721 proc_arch[cpu_architecture()], get_cr());
722
723 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
724 list->arch_name, ENDIANNESS);
725 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
726 list->elf_name, ENDIANNESS);
727 elf_hwcap = list->elf_hwcap;
728
729 cpuid_init_hwcaps();
730 patch_aeabi_idiv();
731
732#ifndef CONFIG_ARM_THUMB
733 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
734#endif
735#ifdef CONFIG_MMU
736 init_default_cache_policy(list->__cpu_mm_mmu_flags);
737#endif
738 erratum_a15_798181_init();
739
740 elf_hwcap_fixup();
741
742 cacheid_init();
743 cpu_init();
744}
745
746void __init dump_machine_table(void)
747{
748 const struct machine_desc *p;
749
750 early_print("Available machine support:\n\nID (hex)\tNAME\n");
751 for_each_machine_desc(p)
752 early_print("%08x\t%s\n", p->nr, p->name);
753
754 early_print("\nPlease check your kernel config and/or bootloader.\n");
755
756 while (true)
757 /* can't use cpu_relax() here as it may require MMU setup */;
758}
759
760int __init arm_add_memory(u64 start, u64 size)
761{
762 u64 aligned_start;
763
764 /*
765 * Ensure that start/size are aligned to a page boundary.
766 * Size is rounded down, start is rounded up.
767 */
768 aligned_start = PAGE_ALIGN(start);
769 if (aligned_start > start + size)
770 size = 0;
771 else
772 size -= aligned_start - start;
773
774#ifndef CONFIG_PHYS_ADDR_T_64BIT
775 if (aligned_start > ULONG_MAX) {
776 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
777 start);
778 return -EINVAL;
779 }
780
781 if (aligned_start + size > ULONG_MAX) {
782 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
783 (long long)start);
784 /*
785 * To ensure bank->start + bank->size is representable in
786 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
787 * This means we lose a page after masking.
788 */
789 size = ULONG_MAX - aligned_start;
790 }
791#endif
792
793 if (aligned_start < PHYS_OFFSET) {
794 if (aligned_start + size <= PHYS_OFFSET) {
795 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
796 aligned_start, aligned_start + size);
797 return -EINVAL;
798 }
799
800 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
801 aligned_start, (u64)PHYS_OFFSET);
802
803 size -= PHYS_OFFSET - aligned_start;
804 aligned_start = PHYS_OFFSET;
805 }
806
807 start = aligned_start;
808 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
809
810 /*
811 * Check whether this memory region has non-zero size or
812 * invalid node number.
813 */
814 if (size == 0)
815 return -EINVAL;
816
817 memblock_add(start, size);
818 return 0;
819}
820
821/*
822 * Pick out the memory size. We look for mem=size@start,
823 * where start and size are "size[KkMm]"
824 */
825
826static int __init early_mem(char *p)
827{
828 static int usermem __initdata = 0;
829 u64 size;
830 u64 start;
831 char *endp;
832
833 /*
834 * If the user specifies memory size, we
835 * blow away any automatically generated
836 * size.
837 */
838 if (usermem == 0) {
839 usermem = 1;
840 memblock_remove(memblock_start_of_DRAM(),
841 memblock_end_of_DRAM() - memblock_start_of_DRAM());
842 }
843
844 start = PHYS_OFFSET;
845 size = memparse(p, &endp);
846 if (*endp == '@')
847 start = memparse(endp + 1, NULL);
848
849 arm_add_memory(start, size);
850
851 return 0;
852}
853early_param("mem", early_mem);
854
855static void __init request_standard_resources(const struct machine_desc *mdesc)
856{
857 phys_addr_t start, end, res_end;
858 struct resource *res;
859 u64 i;
860
861 kernel_code.start = virt_to_phys(_text);
862 kernel_code.end = virt_to_phys(__init_begin - 1);
863 kernel_data.start = virt_to_phys(_sdata);
864 kernel_data.end = virt_to_phys(_end - 1);
865
866 for_each_mem_range(i, &start, &end) {
867 unsigned long boot_alias_start;
868
869 /*
870 * In memblock, end points to the first byte after the
871 * range while in resourses, end points to the last byte in
872 * the range.
873 */
874 res_end = end - 1;
875
876 /*
877 * Some systems have a special memory alias which is only
878 * used for booting. We need to advertise this region to
879 * kexec-tools so they know where bootable RAM is located.
880 */
881 boot_alias_start = phys_to_idmap(start);
882 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
883 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
884 if (!res)
885 panic("%s: Failed to allocate %zu bytes\n",
886 __func__, sizeof(*res));
887 res->name = "System RAM (boot alias)";
888 res->start = boot_alias_start;
889 res->end = phys_to_idmap(res_end);
890 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
891 request_resource(&iomem_resource, res);
892 }
893
894 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
895 if (!res)
896 panic("%s: Failed to allocate %zu bytes\n", __func__,
897 sizeof(*res));
898 res->name = "System RAM";
899 res->start = start;
900 res->end = res_end;
901 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
902
903 request_resource(&iomem_resource, res);
904
905 if (kernel_code.start >= res->start &&
906 kernel_code.end <= res->end)
907 request_resource(res, &kernel_code);
908 if (kernel_data.start >= res->start &&
909 kernel_data.end <= res->end)
910 request_resource(res, &kernel_data);
911 }
912
913 if (mdesc->video_start) {
914 video_ram.start = mdesc->video_start;
915 video_ram.end = mdesc->video_end;
916 request_resource(&iomem_resource, &video_ram);
917 }
918
919 /*
920 * Some machines don't have the possibility of ever
921 * possessing lp0, lp1 or lp2
922 */
923 if (mdesc->reserve_lp0)
924 request_resource(&ioport_resource, &lp0);
925 if (mdesc->reserve_lp1)
926 request_resource(&ioport_resource, &lp1);
927 if (mdesc->reserve_lp2)
928 request_resource(&ioport_resource, &lp2);
929}
930
931#if defined(CONFIG_VGA_CONSOLE)
932struct screen_info vgacon_screen_info = {
933 .orig_video_lines = 30,
934 .orig_video_cols = 80,
935 .orig_video_mode = 0,
936 .orig_video_ega_bx = 0,
937 .orig_video_isVGA = 1,
938 .orig_video_points = 8
939};
940#endif
941
942static int __init customize_machine(void)
943{
944 /*
945 * customizes platform devices, or adds new ones
946 * On DT based machines, we fall back to populating the
947 * machine from the device tree, if no callback is provided,
948 * otherwise we would always need an init_machine callback.
949 */
950 if (machine_desc->init_machine)
951 machine_desc->init_machine();
952
953 return 0;
954}
955arch_initcall(customize_machine);
956
957static int __init init_machine_late(void)
958{
959 struct device_node *root;
960 int ret;
961
962 if (machine_desc->init_late)
963 machine_desc->init_late();
964
965 root = of_find_node_by_path("/");
966 if (root) {
967 ret = of_property_read_string(root, "serial-number",
968 &system_serial);
969 if (ret)
970 system_serial = NULL;
971 }
972
973 if (!system_serial)
974 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
975 system_serial_high,
976 system_serial_low);
977
978 return 0;
979}
980late_initcall(init_machine_late);
981
982#ifdef CONFIG_CRASH_RESERVE
983/*
984 * The crash region must be aligned to 128MB to avoid
985 * zImage relocating below the reserved region.
986 */
987#define CRASH_ALIGN (128 << 20)
988
989static inline unsigned long long get_total_mem(void)
990{
991 unsigned long total;
992
993 total = max_low_pfn - min_low_pfn;
994 return total << PAGE_SHIFT;
995}
996
997/**
998 * reserve_crashkernel() - reserves memory are for crash kernel
999 *
1000 * This function reserves memory area given in "crashkernel=" kernel command
1001 * line parameter. The memory reserved is used by a dump capture kernel when
1002 * primary kernel is crashing.
1003 */
1004static void __init reserve_crashkernel(void)
1005{
1006 unsigned long long crash_size, crash_base;
1007 unsigned long long total_mem;
1008 int ret;
1009
1010 total_mem = get_total_mem();
1011 ret = parse_crashkernel(boot_command_line, total_mem,
1012 &crash_size, &crash_base,
1013 NULL, NULL);
1014 /* invalid value specified or crashkernel=0 */
1015 if (ret || !crash_size)
1016 return;
1017
1018 if (crash_base <= 0) {
1019 unsigned long long crash_max = idmap_to_phys((u32)~0);
1020 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1021 if (crash_max > lowmem_max)
1022 crash_max = lowmem_max;
1023
1024 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
1025 CRASH_ALIGN, crash_max);
1026 if (!crash_base) {
1027 pr_err("crashkernel reservation failed - No suitable area found.\n");
1028 return;
1029 }
1030 } else {
1031 unsigned long long crash_max = crash_base + crash_size;
1032 unsigned long long start;
1033
1034 start = memblock_phys_alloc_range(crash_size, SECTION_SIZE,
1035 crash_base, crash_max);
1036 if (!start) {
1037 pr_err("crashkernel reservation failed - memory is in use.\n");
1038 return;
1039 }
1040 }
1041
1042 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1043 (unsigned long)(crash_size >> 20),
1044 (unsigned long)(crash_base >> 20),
1045 (unsigned long)(total_mem >> 20));
1046
1047 /* The crashk resource must always be located in normal mem */
1048 crashk_res.start = crash_base;
1049 crashk_res.end = crash_base + crash_size - 1;
1050 insert_resource(&iomem_resource, &crashk_res);
1051
1052 if (arm_has_idmap_alias()) {
1053 /*
1054 * If we have a special RAM alias for use at boot, we
1055 * need to advertise to kexec tools where the alias is.
1056 */
1057 static struct resource crashk_boot_res = {
1058 .name = "Crash kernel (boot alias)",
1059 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1060 };
1061
1062 crashk_boot_res.start = phys_to_idmap(crash_base);
1063 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1064 insert_resource(&iomem_resource, &crashk_boot_res);
1065 }
1066}
1067#else
1068static inline void reserve_crashkernel(void) {}
1069#endif /* CONFIG_CRASH_RESERVE*/
1070
1071void __init hyp_mode_check(void)
1072{
1073#ifdef CONFIG_ARM_VIRT_EXT
1074 sync_boot_mode();
1075
1076 if (is_hyp_mode_available()) {
1077 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1078 pr_info("CPU: Virtualization extensions available.\n");
1079 } else if (is_hyp_mode_mismatched()) {
1080 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1081 __boot_cpu_mode & MODE_MASK);
1082 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1083 } else
1084 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1085#endif
1086}
1087
1088static void (*__arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd);
1089
1090static int arm_restart(struct notifier_block *nb, unsigned long action,
1091 void *data)
1092{
1093 __arm_pm_restart(action, data);
1094 return NOTIFY_DONE;
1095}
1096
1097static struct notifier_block arm_restart_nb = {
1098 .notifier_call = arm_restart,
1099 .priority = 128,
1100};
1101
1102void __init setup_arch(char **cmdline_p)
1103{
1104 const struct machine_desc *mdesc = NULL;
1105 void *atags_vaddr = NULL;
1106
1107 if (__atags_pointer)
1108 atags_vaddr = FDT_VIRT_BASE(__atags_pointer);
1109
1110 setup_processor();
1111 if (atags_vaddr) {
1112 mdesc = setup_machine_fdt(atags_vaddr);
1113 if (mdesc)
1114 memblock_reserve(__atags_pointer,
1115 fdt_totalsize(atags_vaddr));
1116 }
1117 if (!mdesc)
1118 mdesc = setup_machine_tags(atags_vaddr, __machine_arch_type);
1119 if (!mdesc) {
1120 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1121 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1122 __atags_pointer);
1123 if (__atags_pointer)
1124 early_print(" r2[]=%*ph\n", 16, atags_vaddr);
1125 dump_machine_table();
1126 }
1127
1128 machine_desc = mdesc;
1129 machine_name = mdesc->name;
1130 dump_stack_set_arch_desc("%s", mdesc->name);
1131
1132 if (mdesc->reboot_mode != REBOOT_HARD)
1133 reboot_mode = mdesc->reboot_mode;
1134
1135 setup_initial_init_mm(_text, _etext, _edata, _end);
1136
1137 /* populate cmd_line too for later use, preserving boot_command_line */
1138 strscpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1139 *cmdline_p = cmd_line;
1140
1141 early_fixmap_init();
1142 early_ioremap_init();
1143
1144 parse_early_param();
1145
1146#ifdef CONFIG_MMU
1147 early_mm_init(mdesc);
1148#endif
1149 setup_dma_zone(mdesc);
1150 xen_early_init();
1151 arm_efi_init();
1152 /*
1153 * Make sure the calculation for lowmem/highmem is set appropriately
1154 * before reserving/allocating any memory
1155 */
1156 adjust_lowmem_bounds();
1157 arm_memblock_init(mdesc);
1158 /* Memory may have been removed so recalculate the bounds. */
1159 adjust_lowmem_bounds();
1160
1161 early_ioremap_reset();
1162
1163 paging_init(mdesc);
1164 kasan_init();
1165 request_standard_resources(mdesc);
1166
1167 if (mdesc->restart) {
1168 __arm_pm_restart = mdesc->restart;
1169 register_restart_handler(&arm_restart_nb);
1170 }
1171
1172 unflatten_device_tree();
1173
1174 arm_dt_init_cpu_maps();
1175 psci_dt_init();
1176#ifdef CONFIG_SMP
1177 if (is_smp()) {
1178 if (!mdesc->smp_init || !mdesc->smp_init()) {
1179 if (psci_smp_available())
1180 smp_set_ops(&psci_smp_ops);
1181 else if (mdesc->smp)
1182 smp_set_ops(mdesc->smp);
1183 }
1184 smp_init_cpus();
1185 smp_build_mpidr_hash();
1186 }
1187#endif
1188
1189 if (!is_smp())
1190 hyp_mode_check();
1191
1192 reserve_crashkernel();
1193
1194#ifdef CONFIG_VT
1195#if defined(CONFIG_VGA_CONSOLE)
1196 vgacon_register_screen(&vgacon_screen_info);
1197#endif
1198#endif
1199
1200 if (mdesc->init_early)
1201 mdesc->init_early();
1202}
1203
1204bool arch_cpu_is_hotpluggable(int num)
1205{
1206 return platform_can_hotplug_cpu(num);
1207}
1208
1209#ifdef CONFIG_HAVE_PROC_CPU
1210static int __init proc_cpu_init(void)
1211{
1212 struct proc_dir_entry *res;
1213
1214 res = proc_mkdir("cpu", NULL);
1215 if (!res)
1216 return -ENOMEM;
1217 return 0;
1218}
1219fs_initcall(proc_cpu_init);
1220#endif
1221
1222static const char *hwcap_str[] = {
1223 "swp",
1224 "half",
1225 "thumb",
1226 "26bit",
1227 "fastmult",
1228 "fpa",
1229 "vfp",
1230 "edsp",
1231 "java",
1232 "iwmmxt",
1233 "crunch",
1234 "thumbee",
1235 "neon",
1236 "vfpv3",
1237 "vfpv3d16",
1238 "tls",
1239 "vfpv4",
1240 "idiva",
1241 "idivt",
1242 "vfpd32",
1243 "lpae",
1244 "evtstrm",
1245 "fphp",
1246 "asimdhp",
1247 "asimddp",
1248 "asimdfhm",
1249 "asimdbf16",
1250 "i8mm",
1251 NULL
1252};
1253
1254static const char *hwcap2_str[] = {
1255 "aes",
1256 "pmull",
1257 "sha1",
1258 "sha2",
1259 "crc32",
1260 "sb",
1261 "ssbs",
1262 NULL
1263};
1264
1265static int c_show(struct seq_file *m, void *v)
1266{
1267 int i, j;
1268 u32 cpuid;
1269
1270 for_each_online_cpu(i) {
1271 /*
1272 * glibc reads /proc/cpuinfo to determine the number of
1273 * online processors, looking for lines beginning with
1274 * "processor". Give glibc what it expects.
1275 */
1276 seq_printf(m, "processor\t: %d\n", i);
1277 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1278 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1279 cpu_name, cpuid & 15, elf_platform);
1280
1281#if defined(CONFIG_SMP)
1282 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1283 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1284 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1285#else
1286 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1287 loops_per_jiffy / (500000/HZ),
1288 (loops_per_jiffy / (5000/HZ)) % 100);
1289#endif
1290 /* dump out the processor features */
1291 seq_puts(m, "Features\t: ");
1292
1293 for (j = 0; hwcap_str[j]; j++)
1294 if (elf_hwcap & (1 << j))
1295 seq_printf(m, "%s ", hwcap_str[j]);
1296
1297 for (j = 0; hwcap2_str[j]; j++)
1298 if (elf_hwcap2 & (1 << j))
1299 seq_printf(m, "%s ", hwcap2_str[j]);
1300
1301 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1302 seq_printf(m, "CPU architecture: %s\n",
1303 proc_arch[cpu_architecture()]);
1304
1305 if ((cpuid & 0x0008f000) == 0x00000000) {
1306 /* pre-ARM7 */
1307 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1308 } else {
1309 if ((cpuid & 0x0008f000) == 0x00007000) {
1310 /* ARM7 */
1311 seq_printf(m, "CPU variant\t: 0x%02x\n",
1312 (cpuid >> 16) & 127);
1313 } else {
1314 /* post-ARM7 */
1315 seq_printf(m, "CPU variant\t: 0x%x\n",
1316 (cpuid >> 20) & 15);
1317 }
1318 seq_printf(m, "CPU part\t: 0x%03x\n",
1319 (cpuid >> 4) & 0xfff);
1320 }
1321 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1322 }
1323
1324 seq_printf(m, "Hardware\t: %s\n", machine_name);
1325 seq_printf(m, "Revision\t: %04x\n", system_rev);
1326 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1327
1328 return 0;
1329}
1330
1331static void *c_start(struct seq_file *m, loff_t *pos)
1332{
1333 return *pos < 1 ? (void *)1 : NULL;
1334}
1335
1336static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1337{
1338 ++*pos;
1339 return NULL;
1340}
1341
1342static void c_stop(struct seq_file *m, void *v)
1343{
1344}
1345
1346const struct seq_operations cpuinfo_op = {
1347 .start = c_start,
1348 .next = c_next,
1349 .stop = c_stop,
1350 .show = c_show
1351};
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/setup.c
4 *
5 * Copyright (C) 1995-2001 Russell King
6 */
7#include <linux/efi.h>
8#include <linux/export.h>
9#include <linux/kernel.h>
10#include <linux/stddef.h>
11#include <linux/ioport.h>
12#include <linux/delay.h>
13#include <linux/utsname.h>
14#include <linux/initrd.h>
15#include <linux/console.h>
16#include <linux/seq_file.h>
17#include <linux/screen_info.h>
18#include <linux/of_platform.h>
19#include <linux/init.h>
20#include <linux/kexec.h>
21#include <linux/of_fdt.h>
22#include <linux/cpu.h>
23#include <linux/interrupt.h>
24#include <linux/smp.h>
25#include <linux/proc_fs.h>
26#include <linux/memblock.h>
27#include <linux/bug.h>
28#include <linux/compiler.h>
29#include <linux/sort.h>
30#include <linux/psci.h>
31
32#include <asm/unified.h>
33#include <asm/cp15.h>
34#include <asm/cpu.h>
35#include <asm/cputype.h>
36#include <asm/efi.h>
37#include <asm/elf.h>
38#include <asm/early_ioremap.h>
39#include <asm/fixmap.h>
40#include <asm/procinfo.h>
41#include <asm/psci.h>
42#include <asm/sections.h>
43#include <asm/setup.h>
44#include <asm/smp_plat.h>
45#include <asm/mach-types.h>
46#include <asm/cacheflush.h>
47#include <asm/cachetype.h>
48#include <asm/tlbflush.h>
49#include <asm/xen/hypervisor.h>
50
51#include <asm/prom.h>
52#include <asm/mach/arch.h>
53#include <asm/mach/irq.h>
54#include <asm/mach/time.h>
55#include <asm/system_info.h>
56#include <asm/system_misc.h>
57#include <asm/traps.h>
58#include <asm/unwind.h>
59#include <asm/memblock.h>
60#include <asm/virt.h>
61
62#include "atags.h"
63
64
65#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
66char fpe_type[8];
67
68static int __init fpe_setup(char *line)
69{
70 memcpy(fpe_type, line, 8);
71 return 1;
72}
73
74__setup("fpe=", fpe_setup);
75#endif
76
77extern void init_default_cache_policy(unsigned long);
78extern void paging_init(const struct machine_desc *desc);
79extern void early_mm_init(const struct machine_desc *);
80extern void adjust_lowmem_bounds(void);
81extern enum reboot_mode reboot_mode;
82extern void setup_dma_zone(const struct machine_desc *desc);
83
84unsigned int processor_id;
85EXPORT_SYMBOL(processor_id);
86unsigned int __machine_arch_type __read_mostly;
87EXPORT_SYMBOL(__machine_arch_type);
88unsigned int cacheid __read_mostly;
89EXPORT_SYMBOL(cacheid);
90
91unsigned int __atags_pointer __initdata;
92
93unsigned int system_rev;
94EXPORT_SYMBOL(system_rev);
95
96const char *system_serial;
97EXPORT_SYMBOL(system_serial);
98
99unsigned int system_serial_low;
100EXPORT_SYMBOL(system_serial_low);
101
102unsigned int system_serial_high;
103EXPORT_SYMBOL(system_serial_high);
104
105unsigned int elf_hwcap __read_mostly;
106EXPORT_SYMBOL(elf_hwcap);
107
108unsigned int elf_hwcap2 __read_mostly;
109EXPORT_SYMBOL(elf_hwcap2);
110
111
112#ifdef MULTI_CPU
113struct processor processor __ro_after_init;
114#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR)
115struct processor *cpu_vtable[NR_CPUS] = {
116 [0] = &processor,
117};
118#endif
119#endif
120#ifdef MULTI_TLB
121struct cpu_tlb_fns cpu_tlb __ro_after_init;
122#endif
123#ifdef MULTI_USER
124struct cpu_user_fns cpu_user __ro_after_init;
125#endif
126#ifdef MULTI_CACHE
127struct cpu_cache_fns cpu_cache __ro_after_init;
128#endif
129#ifdef CONFIG_OUTER_CACHE
130struct outer_cache_fns outer_cache __ro_after_init;
131EXPORT_SYMBOL(outer_cache);
132#endif
133
134/*
135 * Cached cpu_architecture() result for use by assembler code.
136 * C code should use the cpu_architecture() function instead of accessing this
137 * variable directly.
138 */
139int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;
140
141struct stack {
142 u32 irq[3];
143 u32 abt[3];
144 u32 und[3];
145 u32 fiq[3];
146} ____cacheline_aligned;
147
148#ifndef CONFIG_CPU_V7M
149static struct stack stacks[NR_CPUS];
150#endif
151
152char elf_platform[ELF_PLATFORM_SIZE];
153EXPORT_SYMBOL(elf_platform);
154
155static const char *cpu_name;
156static const char *machine_name;
157static char __initdata cmd_line[COMMAND_LINE_SIZE];
158const struct machine_desc *machine_desc __initdata;
159
160static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
161#define ENDIANNESS ((char)endian_test.l)
162
163DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);
164
165/*
166 * Standard memory resources
167 */
168static struct resource mem_res[] = {
169 {
170 .name = "Video RAM",
171 .start = 0,
172 .end = 0,
173 .flags = IORESOURCE_MEM
174 },
175 {
176 .name = "Kernel code",
177 .start = 0,
178 .end = 0,
179 .flags = IORESOURCE_SYSTEM_RAM
180 },
181 {
182 .name = "Kernel data",
183 .start = 0,
184 .end = 0,
185 .flags = IORESOURCE_SYSTEM_RAM
186 }
187};
188
189#define video_ram mem_res[0]
190#define kernel_code mem_res[1]
191#define kernel_data mem_res[2]
192
193static struct resource io_res[] = {
194 {
195 .name = "reserved",
196 .start = 0x3bc,
197 .end = 0x3be,
198 .flags = IORESOURCE_IO | IORESOURCE_BUSY
199 },
200 {
201 .name = "reserved",
202 .start = 0x378,
203 .end = 0x37f,
204 .flags = IORESOURCE_IO | IORESOURCE_BUSY
205 },
206 {
207 .name = "reserved",
208 .start = 0x278,
209 .end = 0x27f,
210 .flags = IORESOURCE_IO | IORESOURCE_BUSY
211 }
212};
213
214#define lp0 io_res[0]
215#define lp1 io_res[1]
216#define lp2 io_res[2]
217
218static const char *proc_arch[] = {
219 "undefined/unknown",
220 "3",
221 "4",
222 "4T",
223 "5",
224 "5T",
225 "5TE",
226 "5TEJ",
227 "6TEJ",
228 "7",
229 "7M",
230 "?(12)",
231 "?(13)",
232 "?(14)",
233 "?(15)",
234 "?(16)",
235 "?(17)",
236};
237
238#ifdef CONFIG_CPU_V7M
239static int __get_cpu_architecture(void)
240{
241 return CPU_ARCH_ARMv7M;
242}
243#else
244static int __get_cpu_architecture(void)
245{
246 int cpu_arch;
247
248 if ((read_cpuid_id() & 0x0008f000) == 0) {
249 cpu_arch = CPU_ARCH_UNKNOWN;
250 } else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
251 cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
252 } else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
253 cpu_arch = (read_cpuid_id() >> 16) & 7;
254 if (cpu_arch)
255 cpu_arch += CPU_ARCH_ARMv3;
256 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
257 /* Revised CPUID format. Read the Memory Model Feature
258 * Register 0 and check for VMSAv7 or PMSAv7 */
259 unsigned int mmfr0 = read_cpuid_ext(CPUID_EXT_MMFR0);
260 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
261 (mmfr0 & 0x000000f0) >= 0x00000030)
262 cpu_arch = CPU_ARCH_ARMv7;
263 else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
264 (mmfr0 & 0x000000f0) == 0x00000020)
265 cpu_arch = CPU_ARCH_ARMv6;
266 else
267 cpu_arch = CPU_ARCH_UNKNOWN;
268 } else
269 cpu_arch = CPU_ARCH_UNKNOWN;
270
271 return cpu_arch;
272}
273#endif
274
275int __pure cpu_architecture(void)
276{
277 BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);
278
279 return __cpu_architecture;
280}
281
282static int cpu_has_aliasing_icache(unsigned int arch)
283{
284 int aliasing_icache;
285 unsigned int id_reg, num_sets, line_size;
286
287 /* PIPT caches never alias. */
288 if (icache_is_pipt())
289 return 0;
290
291 /* arch specifies the register format */
292 switch (arch) {
293 case CPU_ARCH_ARMv7:
294 set_csselr(CSSELR_ICACHE | CSSELR_L1);
295 isb();
296 id_reg = read_ccsidr();
297 line_size = 4 << ((id_reg & 0x7) + 2);
298 num_sets = ((id_reg >> 13) & 0x7fff) + 1;
299 aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
300 break;
301 case CPU_ARCH_ARMv6:
302 aliasing_icache = read_cpuid_cachetype() & (1 << 11);
303 break;
304 default:
305 /* I-cache aliases will be handled by D-cache aliasing code */
306 aliasing_icache = 0;
307 }
308
309 return aliasing_icache;
310}
311
312static void __init cacheid_init(void)
313{
314 unsigned int arch = cpu_architecture();
315
316 if (arch >= CPU_ARCH_ARMv6) {
317 unsigned int cachetype = read_cpuid_cachetype();
318
319 if ((arch == CPU_ARCH_ARMv7M) && !(cachetype & 0xf000f)) {
320 cacheid = 0;
321 } else if ((cachetype & (7 << 29)) == 4 << 29) {
322 /* ARMv7 register format */
323 arch = CPU_ARCH_ARMv7;
324 cacheid = CACHEID_VIPT_NONALIASING;
325 switch (cachetype & (3 << 14)) {
326 case (1 << 14):
327 cacheid |= CACHEID_ASID_TAGGED;
328 break;
329 case (3 << 14):
330 cacheid |= CACHEID_PIPT;
331 break;
332 }
333 } else {
334 arch = CPU_ARCH_ARMv6;
335 if (cachetype & (1 << 23))
336 cacheid = CACHEID_VIPT_ALIASING;
337 else
338 cacheid = CACHEID_VIPT_NONALIASING;
339 }
340 if (cpu_has_aliasing_icache(arch))
341 cacheid |= CACHEID_VIPT_I_ALIASING;
342 } else {
343 cacheid = CACHEID_VIVT;
344 }
345
346 pr_info("CPU: %s data cache, %s instruction cache\n",
347 cache_is_vivt() ? "VIVT" :
348 cache_is_vipt_aliasing() ? "VIPT aliasing" :
349 cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
350 cache_is_vivt() ? "VIVT" :
351 icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
352 icache_is_vipt_aliasing() ? "VIPT aliasing" :
353 icache_is_pipt() ? "PIPT" :
354 cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
355}
356
357/*
358 * These functions re-use the assembly code in head.S, which
359 * already provide the required functionality.
360 */
361extern struct proc_info_list *lookup_processor_type(unsigned int);
362
363void __init early_print(const char *str, ...)
364{
365 extern void printascii(const char *);
366 char buf[256];
367 va_list ap;
368
369 va_start(ap, str);
370 vsnprintf(buf, sizeof(buf), str, ap);
371 va_end(ap);
372
373#ifdef CONFIG_DEBUG_LL
374 printascii(buf);
375#endif
376 printk("%s", buf);
377}
378
379#ifdef CONFIG_ARM_PATCH_IDIV
380
381static inline u32 __attribute_const__ sdiv_instruction(void)
382{
383 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
384 /* "sdiv r0, r0, r1" */
385 u32 insn = __opcode_thumb32_compose(0xfb90, 0xf0f1);
386 return __opcode_to_mem_thumb32(insn);
387 }
388
389 /* "sdiv r0, r0, r1" */
390 return __opcode_to_mem_arm(0xe710f110);
391}
392
393static inline u32 __attribute_const__ udiv_instruction(void)
394{
395 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
396 /* "udiv r0, r0, r1" */
397 u32 insn = __opcode_thumb32_compose(0xfbb0, 0xf0f1);
398 return __opcode_to_mem_thumb32(insn);
399 }
400
401 /* "udiv r0, r0, r1" */
402 return __opcode_to_mem_arm(0xe730f110);
403}
404
405static inline u32 __attribute_const__ bx_lr_instruction(void)
406{
407 if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
408 /* "bx lr; nop" */
409 u32 insn = __opcode_thumb32_compose(0x4770, 0x46c0);
410 return __opcode_to_mem_thumb32(insn);
411 }
412
413 /* "bx lr" */
414 return __opcode_to_mem_arm(0xe12fff1e);
415}
416
417static void __init patch_aeabi_idiv(void)
418{
419 extern void __aeabi_uidiv(void);
420 extern void __aeabi_idiv(void);
421 uintptr_t fn_addr;
422 unsigned int mask;
423
424 mask = IS_ENABLED(CONFIG_THUMB2_KERNEL) ? HWCAP_IDIVT : HWCAP_IDIVA;
425 if (!(elf_hwcap & mask))
426 return;
427
428 pr_info("CPU: div instructions available: patching division code\n");
429
430 fn_addr = ((uintptr_t)&__aeabi_uidiv) & ~1;
431 asm ("" : "+g" (fn_addr));
432 ((u32 *)fn_addr)[0] = udiv_instruction();
433 ((u32 *)fn_addr)[1] = bx_lr_instruction();
434 flush_icache_range(fn_addr, fn_addr + 8);
435
436 fn_addr = ((uintptr_t)&__aeabi_idiv) & ~1;
437 asm ("" : "+g" (fn_addr));
438 ((u32 *)fn_addr)[0] = sdiv_instruction();
439 ((u32 *)fn_addr)[1] = bx_lr_instruction();
440 flush_icache_range(fn_addr, fn_addr + 8);
441}
442
443#else
444static inline void patch_aeabi_idiv(void) { }
445#endif
446
447static void __init cpuid_init_hwcaps(void)
448{
449 int block;
450 u32 isar5;
451
452 if (cpu_architecture() < CPU_ARCH_ARMv7)
453 return;
454
455 block = cpuid_feature_extract(CPUID_EXT_ISAR0, 24);
456 if (block >= 2)
457 elf_hwcap |= HWCAP_IDIVA;
458 if (block >= 1)
459 elf_hwcap |= HWCAP_IDIVT;
460
461 /* LPAE implies atomic ldrd/strd instructions */
462 block = cpuid_feature_extract(CPUID_EXT_MMFR0, 0);
463 if (block >= 5)
464 elf_hwcap |= HWCAP_LPAE;
465
466 /* check for supported v8 Crypto instructions */
467 isar5 = read_cpuid_ext(CPUID_EXT_ISAR5);
468
469 block = cpuid_feature_extract_field(isar5, 4);
470 if (block >= 2)
471 elf_hwcap2 |= HWCAP2_PMULL;
472 if (block >= 1)
473 elf_hwcap2 |= HWCAP2_AES;
474
475 block = cpuid_feature_extract_field(isar5, 8);
476 if (block >= 1)
477 elf_hwcap2 |= HWCAP2_SHA1;
478
479 block = cpuid_feature_extract_field(isar5, 12);
480 if (block >= 1)
481 elf_hwcap2 |= HWCAP2_SHA2;
482
483 block = cpuid_feature_extract_field(isar5, 16);
484 if (block >= 1)
485 elf_hwcap2 |= HWCAP2_CRC32;
486}
487
488static void __init elf_hwcap_fixup(void)
489{
490 unsigned id = read_cpuid_id();
491
492 /*
493 * HWCAP_TLS is available only on 1136 r1p0 and later,
494 * see also kuser_get_tls_init.
495 */
496 if (read_cpuid_part() == ARM_CPU_PART_ARM1136 &&
497 ((id >> 20) & 3) == 0) {
498 elf_hwcap &= ~HWCAP_TLS;
499 return;
500 }
501
502 /* Verify if CPUID scheme is implemented */
503 if ((id & 0x000f0000) != 0x000f0000)
504 return;
505
506 /*
507 * If the CPU supports LDREX/STREX and LDREXB/STREXB,
508 * avoid advertising SWP; it may not be atomic with
509 * multiprocessing cores.
510 */
511 if (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) > 1 ||
512 (cpuid_feature_extract(CPUID_EXT_ISAR3, 12) == 1 &&
513 cpuid_feature_extract(CPUID_EXT_ISAR4, 20) >= 3))
514 elf_hwcap &= ~HWCAP_SWP;
515}
516
517/*
518 * cpu_init - initialise one CPU.
519 *
520 * cpu_init sets up the per-CPU stacks.
521 */
522void notrace cpu_init(void)
523{
524#ifndef CONFIG_CPU_V7M
525 unsigned int cpu = smp_processor_id();
526 struct stack *stk = &stacks[cpu];
527
528 if (cpu >= NR_CPUS) {
529 pr_crit("CPU%u: bad primary CPU number\n", cpu);
530 BUG();
531 }
532
533 /*
534 * This only works on resume and secondary cores. For booting on the
535 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
536 */
537 set_my_cpu_offset(per_cpu_offset(cpu));
538
539 cpu_proc_init();
540
541 /*
542 * Define the placement constraint for the inline asm directive below.
543 * In Thumb-2, msr with an immediate value is not allowed.
544 */
545#ifdef CONFIG_THUMB2_KERNEL
546#define PLC "r"
547#else
548#define PLC "I"
549#endif
550
551 /*
552 * setup stacks for re-entrant exception handlers
553 */
554 __asm__ (
555 "msr cpsr_c, %1\n\t"
556 "add r14, %0, %2\n\t"
557 "mov sp, r14\n\t"
558 "msr cpsr_c, %3\n\t"
559 "add r14, %0, %4\n\t"
560 "mov sp, r14\n\t"
561 "msr cpsr_c, %5\n\t"
562 "add r14, %0, %6\n\t"
563 "mov sp, r14\n\t"
564 "msr cpsr_c, %7\n\t"
565 "add r14, %0, %8\n\t"
566 "mov sp, r14\n\t"
567 "msr cpsr_c, %9"
568 :
569 : "r" (stk),
570 PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
571 "I" (offsetof(struct stack, irq[0])),
572 PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
573 "I" (offsetof(struct stack, abt[0])),
574 PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
575 "I" (offsetof(struct stack, und[0])),
576 PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
577 "I" (offsetof(struct stack, fiq[0])),
578 PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
579 : "r14");
580#endif
581}
582
583u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
584
585void __init smp_setup_processor_id(void)
586{
587 int i;
588 u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
589 u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
590
591 cpu_logical_map(0) = cpu;
592 for (i = 1; i < nr_cpu_ids; ++i)
593 cpu_logical_map(i) = i == cpu ? 0 : i;
594
595 /*
596 * clear __my_cpu_offset on boot CPU to avoid hang caused by
597 * using percpu variable early, for example, lockdep will
598 * access percpu variable inside lock_release
599 */
600 set_my_cpu_offset(0);
601
602 pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
603}
604
605struct mpidr_hash mpidr_hash;
606#ifdef CONFIG_SMP
607/**
608 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
609 * level in order to build a linear index from an
610 * MPIDR value. Resulting algorithm is a collision
611 * free hash carried out through shifting and ORing
612 */
613static void __init smp_build_mpidr_hash(void)
614{
615 u32 i, affinity;
616 u32 fs[3], bits[3], ls, mask = 0;
617 /*
618 * Pre-scan the list of MPIDRS and filter out bits that do
619 * not contribute to affinity levels, ie they never toggle.
620 */
621 for_each_possible_cpu(i)
622 mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
623 pr_debug("mask of set bits 0x%x\n", mask);
624 /*
625 * Find and stash the last and first bit set at all affinity levels to
626 * check how many bits are required to represent them.
627 */
628 for (i = 0; i < 3; i++) {
629 affinity = MPIDR_AFFINITY_LEVEL(mask, i);
630 /*
631 * Find the MSB bit and LSB bits position
632 * to determine how many bits are required
633 * to express the affinity level.
634 */
635 ls = fls(affinity);
636 fs[i] = affinity ? ffs(affinity) - 1 : 0;
637 bits[i] = ls - fs[i];
638 }
639 /*
640 * An index can be created from the MPIDR by isolating the
641 * significant bits at each affinity level and by shifting
642 * them in order to compress the 24 bits values space to a
643 * compressed set of values. This is equivalent to hashing
644 * the MPIDR through shifting and ORing. It is a collision free
645 * hash though not minimal since some levels might contain a number
646 * of CPUs that is not an exact power of 2 and their bit
647 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
648 */
649 mpidr_hash.shift_aff[0] = fs[0];
650 mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
651 mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
652 (bits[1] + bits[0]);
653 mpidr_hash.mask = mask;
654 mpidr_hash.bits = bits[2] + bits[1] + bits[0];
655 pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
656 mpidr_hash.shift_aff[0],
657 mpidr_hash.shift_aff[1],
658 mpidr_hash.shift_aff[2],
659 mpidr_hash.mask,
660 mpidr_hash.bits);
661 /*
662 * 4x is an arbitrary value used to warn on a hash table much bigger
663 * than expected on most systems.
664 */
665 if (mpidr_hash_size() > 4 * num_possible_cpus())
666 pr_warn("Large number of MPIDR hash buckets detected\n");
667 sync_cache_w(&mpidr_hash);
668}
669#endif
670
671/*
672 * locate processor in the list of supported processor types. The linker
673 * builds this table for us from the entries in arch/arm/mm/proc-*.S
674 */
675struct proc_info_list *lookup_processor(u32 midr)
676{
677 struct proc_info_list *list = lookup_processor_type(midr);
678
679 if (!list) {
680 pr_err("CPU%u: configuration botched (ID %08x), CPU halted\n",
681 smp_processor_id(), midr);
682 while (1)
683 /* can't use cpu_relax() here as it may require MMU setup */;
684 }
685
686 return list;
687}
688
689static void __init setup_processor(void)
690{
691 unsigned int midr = read_cpuid_id();
692 struct proc_info_list *list = lookup_processor(midr);
693
694 cpu_name = list->cpu_name;
695 __cpu_architecture = __get_cpu_architecture();
696
697 init_proc_vtable(list->proc);
698#ifdef MULTI_TLB
699 cpu_tlb = *list->tlb;
700#endif
701#ifdef MULTI_USER
702 cpu_user = *list->user;
703#endif
704#ifdef MULTI_CACHE
705 cpu_cache = *list->cache;
706#endif
707
708 pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
709 list->cpu_name, midr, midr & 15,
710 proc_arch[cpu_architecture()], get_cr());
711
712 snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
713 list->arch_name, ENDIANNESS);
714 snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
715 list->elf_name, ENDIANNESS);
716 elf_hwcap = list->elf_hwcap;
717
718 cpuid_init_hwcaps();
719 patch_aeabi_idiv();
720
721#ifndef CONFIG_ARM_THUMB
722 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
723#endif
724#ifdef CONFIG_MMU
725 init_default_cache_policy(list->__cpu_mm_mmu_flags);
726#endif
727 erratum_a15_798181_init();
728
729 elf_hwcap_fixup();
730
731 cacheid_init();
732 cpu_init();
733}
734
735void __init dump_machine_table(void)
736{
737 const struct machine_desc *p;
738
739 early_print("Available machine support:\n\nID (hex)\tNAME\n");
740 for_each_machine_desc(p)
741 early_print("%08x\t%s\n", p->nr, p->name);
742
743 early_print("\nPlease check your kernel config and/or bootloader.\n");
744
745 while (true)
746 /* can't use cpu_relax() here as it may require MMU setup */;
747}
748
749int __init arm_add_memory(u64 start, u64 size)
750{
751 u64 aligned_start;
752
753 /*
754 * Ensure that start/size are aligned to a page boundary.
755 * Size is rounded down, start is rounded up.
756 */
757 aligned_start = PAGE_ALIGN(start);
758 if (aligned_start > start + size)
759 size = 0;
760 else
761 size -= aligned_start - start;
762
763#ifndef CONFIG_PHYS_ADDR_T_64BIT
764 if (aligned_start > ULONG_MAX) {
765 pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
766 (long long)start);
767 return -EINVAL;
768 }
769
770 if (aligned_start + size > ULONG_MAX) {
771 pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
772 (long long)start);
773 /*
774 * To ensure bank->start + bank->size is representable in
775 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
776 * This means we lose a page after masking.
777 */
778 size = ULONG_MAX - aligned_start;
779 }
780#endif
781
782 if (aligned_start < PHYS_OFFSET) {
783 if (aligned_start + size <= PHYS_OFFSET) {
784 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
785 aligned_start, aligned_start + size);
786 return -EINVAL;
787 }
788
789 pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
790 aligned_start, (u64)PHYS_OFFSET);
791
792 size -= PHYS_OFFSET - aligned_start;
793 aligned_start = PHYS_OFFSET;
794 }
795
796 start = aligned_start;
797 size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
798
799 /*
800 * Check whether this memory region has non-zero size or
801 * invalid node number.
802 */
803 if (size == 0)
804 return -EINVAL;
805
806 memblock_add(start, size);
807 return 0;
808}
809
810/*
811 * Pick out the memory size. We look for mem=size@start,
812 * where start and size are "size[KkMm]"
813 */
814
815static int __init early_mem(char *p)
816{
817 static int usermem __initdata = 0;
818 u64 size;
819 u64 start;
820 char *endp;
821
822 /*
823 * If the user specifies memory size, we
824 * blow away any automatically generated
825 * size.
826 */
827 if (usermem == 0) {
828 usermem = 1;
829 memblock_remove(memblock_start_of_DRAM(),
830 memblock_end_of_DRAM() - memblock_start_of_DRAM());
831 }
832
833 start = PHYS_OFFSET;
834 size = memparse(p, &endp);
835 if (*endp == '@')
836 start = memparse(endp + 1, NULL);
837
838 arm_add_memory(start, size);
839
840 return 0;
841}
842early_param("mem", early_mem);
843
844static void __init request_standard_resources(const struct machine_desc *mdesc)
845{
846 struct memblock_region *region;
847 struct resource *res;
848
849 kernel_code.start = virt_to_phys(_text);
850 kernel_code.end = virt_to_phys(__init_begin - 1);
851 kernel_data.start = virt_to_phys(_sdata);
852 kernel_data.end = virt_to_phys(_end - 1);
853
854 for_each_memblock(memory, region) {
855 phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
856 phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
857 unsigned long boot_alias_start;
858
859 /*
860 * Some systems have a special memory alias which is only
861 * used for booting. We need to advertise this region to
862 * kexec-tools so they know where bootable RAM is located.
863 */
864 boot_alias_start = phys_to_idmap(start);
865 if (arm_has_idmap_alias() && boot_alias_start != IDMAP_INVALID_ADDR) {
866 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
867 if (!res)
868 panic("%s: Failed to allocate %zu bytes\n",
869 __func__, sizeof(*res));
870 res->name = "System RAM (boot alias)";
871 res->start = boot_alias_start;
872 res->end = phys_to_idmap(end);
873 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
874 request_resource(&iomem_resource, res);
875 }
876
877 res = memblock_alloc(sizeof(*res), SMP_CACHE_BYTES);
878 if (!res)
879 panic("%s: Failed to allocate %zu bytes\n", __func__,
880 sizeof(*res));
881 res->name = "System RAM";
882 res->start = start;
883 res->end = end;
884 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
885
886 request_resource(&iomem_resource, res);
887
888 if (kernel_code.start >= res->start &&
889 kernel_code.end <= res->end)
890 request_resource(res, &kernel_code);
891 if (kernel_data.start >= res->start &&
892 kernel_data.end <= res->end)
893 request_resource(res, &kernel_data);
894 }
895
896 if (mdesc->video_start) {
897 video_ram.start = mdesc->video_start;
898 video_ram.end = mdesc->video_end;
899 request_resource(&iomem_resource, &video_ram);
900 }
901
902 /*
903 * Some machines don't have the possibility of ever
904 * possessing lp0, lp1 or lp2
905 */
906 if (mdesc->reserve_lp0)
907 request_resource(&ioport_resource, &lp0);
908 if (mdesc->reserve_lp1)
909 request_resource(&ioport_resource, &lp1);
910 if (mdesc->reserve_lp2)
911 request_resource(&ioport_resource, &lp2);
912}
913
914#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) || \
915 defined(CONFIG_EFI)
916struct screen_info screen_info = {
917 .orig_video_lines = 30,
918 .orig_video_cols = 80,
919 .orig_video_mode = 0,
920 .orig_video_ega_bx = 0,
921 .orig_video_isVGA = 1,
922 .orig_video_points = 8
923};
924#endif
925
926static int __init customize_machine(void)
927{
928 /*
929 * customizes platform devices, or adds new ones
930 * On DT based machines, we fall back to populating the
931 * machine from the device tree, if no callback is provided,
932 * otherwise we would always need an init_machine callback.
933 */
934 if (machine_desc->init_machine)
935 machine_desc->init_machine();
936
937 return 0;
938}
939arch_initcall(customize_machine);
940
941static int __init init_machine_late(void)
942{
943 struct device_node *root;
944 int ret;
945
946 if (machine_desc->init_late)
947 machine_desc->init_late();
948
949 root = of_find_node_by_path("/");
950 if (root) {
951 ret = of_property_read_string(root, "serial-number",
952 &system_serial);
953 if (ret)
954 system_serial = NULL;
955 }
956
957 if (!system_serial)
958 system_serial = kasprintf(GFP_KERNEL, "%08x%08x",
959 system_serial_high,
960 system_serial_low);
961
962 return 0;
963}
964late_initcall(init_machine_late);
965
966#ifdef CONFIG_KEXEC
967/*
968 * The crash region must be aligned to 128MB to avoid
969 * zImage relocating below the reserved region.
970 */
971#define CRASH_ALIGN (128 << 20)
972
973static inline unsigned long long get_total_mem(void)
974{
975 unsigned long total;
976
977 total = max_low_pfn - min_low_pfn;
978 return total << PAGE_SHIFT;
979}
980
981/**
982 * reserve_crashkernel() - reserves memory are for crash kernel
983 *
984 * This function reserves memory area given in "crashkernel=" kernel command
985 * line parameter. The memory reserved is used by a dump capture kernel when
986 * primary kernel is crashing.
987 */
988static void __init reserve_crashkernel(void)
989{
990 unsigned long long crash_size, crash_base;
991 unsigned long long total_mem;
992 int ret;
993
994 total_mem = get_total_mem();
995 ret = parse_crashkernel(boot_command_line, total_mem,
996 &crash_size, &crash_base);
997 if (ret)
998 return;
999
1000 if (crash_base <= 0) {
1001 unsigned long long crash_max = idmap_to_phys((u32)~0);
1002 unsigned long long lowmem_max = __pa(high_memory - 1) + 1;
1003 if (crash_max > lowmem_max)
1004 crash_max = lowmem_max;
1005 crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
1006 crash_size, CRASH_ALIGN);
1007 if (!crash_base) {
1008 pr_err("crashkernel reservation failed - No suitable area found.\n");
1009 return;
1010 }
1011 } else {
1012 unsigned long long start;
1013
1014 start = memblock_find_in_range(crash_base,
1015 crash_base + crash_size,
1016 crash_size, SECTION_SIZE);
1017 if (start != crash_base) {
1018 pr_err("crashkernel reservation failed - memory is in use.\n");
1019 return;
1020 }
1021 }
1022
1023 ret = memblock_reserve(crash_base, crash_size);
1024 if (ret < 0) {
1025 pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
1026 (unsigned long)crash_base);
1027 return;
1028 }
1029
1030 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
1031 (unsigned long)(crash_size >> 20),
1032 (unsigned long)(crash_base >> 20),
1033 (unsigned long)(total_mem >> 20));
1034
1035 /* The crashk resource must always be located in normal mem */
1036 crashk_res.start = crash_base;
1037 crashk_res.end = crash_base + crash_size - 1;
1038 insert_resource(&iomem_resource, &crashk_res);
1039
1040 if (arm_has_idmap_alias()) {
1041 /*
1042 * If we have a special RAM alias for use at boot, we
1043 * need to advertise to kexec tools where the alias is.
1044 */
1045 static struct resource crashk_boot_res = {
1046 .name = "Crash kernel (boot alias)",
1047 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
1048 };
1049
1050 crashk_boot_res.start = phys_to_idmap(crash_base);
1051 crashk_boot_res.end = crashk_boot_res.start + crash_size - 1;
1052 insert_resource(&iomem_resource, &crashk_boot_res);
1053 }
1054}
1055#else
1056static inline void reserve_crashkernel(void) {}
1057#endif /* CONFIG_KEXEC */
1058
1059void __init hyp_mode_check(void)
1060{
1061#ifdef CONFIG_ARM_VIRT_EXT
1062 sync_boot_mode();
1063
1064 if (is_hyp_mode_available()) {
1065 pr_info("CPU: All CPU(s) started in HYP mode.\n");
1066 pr_info("CPU: Virtualization extensions available.\n");
1067 } else if (is_hyp_mode_mismatched()) {
1068 pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
1069 __boot_cpu_mode & MODE_MASK);
1070 pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
1071 } else
1072 pr_info("CPU: All CPU(s) started in SVC mode.\n");
1073#endif
1074}
1075
1076void __init setup_arch(char **cmdline_p)
1077{
1078 const struct machine_desc *mdesc;
1079
1080 setup_processor();
1081 mdesc = setup_machine_fdt(__atags_pointer);
1082 if (!mdesc)
1083 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
1084 if (!mdesc) {
1085 early_print("\nError: invalid dtb and unrecognized/unsupported machine ID\n");
1086 early_print(" r1=0x%08x, r2=0x%08x\n", __machine_arch_type,
1087 __atags_pointer);
1088 if (__atags_pointer)
1089 early_print(" r2[]=%*ph\n", 16,
1090 phys_to_virt(__atags_pointer));
1091 dump_machine_table();
1092 }
1093
1094 machine_desc = mdesc;
1095 machine_name = mdesc->name;
1096 dump_stack_set_arch_desc("%s", mdesc->name);
1097
1098 if (mdesc->reboot_mode != REBOOT_HARD)
1099 reboot_mode = mdesc->reboot_mode;
1100
1101 init_mm.start_code = (unsigned long) _text;
1102 init_mm.end_code = (unsigned long) _etext;
1103 init_mm.end_data = (unsigned long) _edata;
1104 init_mm.brk = (unsigned long) _end;
1105
1106 /* populate cmd_line too for later use, preserving boot_command_line */
1107 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
1108 *cmdline_p = cmd_line;
1109
1110 early_fixmap_init();
1111 early_ioremap_init();
1112
1113 parse_early_param();
1114
1115#ifdef CONFIG_MMU
1116 early_mm_init(mdesc);
1117#endif
1118 setup_dma_zone(mdesc);
1119 xen_early_init();
1120 efi_init();
1121 /*
1122 * Make sure the calculation for lowmem/highmem is set appropriately
1123 * before reserving/allocating any mmeory
1124 */
1125 adjust_lowmem_bounds();
1126 arm_memblock_init(mdesc);
1127 /* Memory may have been removed so recalculate the bounds. */
1128 adjust_lowmem_bounds();
1129
1130 early_ioremap_reset();
1131
1132 paging_init(mdesc);
1133 request_standard_resources(mdesc);
1134
1135 if (mdesc->restart)
1136 arm_pm_restart = mdesc->restart;
1137
1138 unflatten_device_tree();
1139
1140 arm_dt_init_cpu_maps();
1141 psci_dt_init();
1142#ifdef CONFIG_SMP
1143 if (is_smp()) {
1144 if (!mdesc->smp_init || !mdesc->smp_init()) {
1145 if (psci_smp_available())
1146 smp_set_ops(&psci_smp_ops);
1147 else if (mdesc->smp)
1148 smp_set_ops(mdesc->smp);
1149 }
1150 smp_init_cpus();
1151 smp_build_mpidr_hash();
1152 }
1153#endif
1154
1155 if (!is_smp())
1156 hyp_mode_check();
1157
1158 reserve_crashkernel();
1159
1160#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
1161 handle_arch_irq = mdesc->handle_irq;
1162#endif
1163
1164#ifdef CONFIG_VT
1165#if defined(CONFIG_VGA_CONSOLE)
1166 conswitchp = &vga_con;
1167#endif
1168#endif
1169
1170 if (mdesc->init_early)
1171 mdesc->init_early();
1172}
1173
1174
1175static int __init topology_init(void)
1176{
1177 int cpu;
1178
1179 for_each_possible_cpu(cpu) {
1180 struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
1181 cpuinfo->cpu.hotpluggable = platform_can_hotplug_cpu(cpu);
1182 register_cpu(&cpuinfo->cpu, cpu);
1183 }
1184
1185 return 0;
1186}
1187subsys_initcall(topology_init);
1188
1189#ifdef CONFIG_HAVE_PROC_CPU
1190static int __init proc_cpu_init(void)
1191{
1192 struct proc_dir_entry *res;
1193
1194 res = proc_mkdir("cpu", NULL);
1195 if (!res)
1196 return -ENOMEM;
1197 return 0;
1198}
1199fs_initcall(proc_cpu_init);
1200#endif
1201
1202static const char *hwcap_str[] = {
1203 "swp",
1204 "half",
1205 "thumb",
1206 "26bit",
1207 "fastmult",
1208 "fpa",
1209 "vfp",
1210 "edsp",
1211 "java",
1212 "iwmmxt",
1213 "crunch",
1214 "thumbee",
1215 "neon",
1216 "vfpv3",
1217 "vfpv3d16",
1218 "tls",
1219 "vfpv4",
1220 "idiva",
1221 "idivt",
1222 "vfpd32",
1223 "lpae",
1224 "evtstrm",
1225 NULL
1226};
1227
1228static const char *hwcap2_str[] = {
1229 "aes",
1230 "pmull",
1231 "sha1",
1232 "sha2",
1233 "crc32",
1234 NULL
1235};
1236
1237static int c_show(struct seq_file *m, void *v)
1238{
1239 int i, j;
1240 u32 cpuid;
1241
1242 for_each_online_cpu(i) {
1243 /*
1244 * glibc reads /proc/cpuinfo to determine the number of
1245 * online processors, looking for lines beginning with
1246 * "processor". Give glibc what it expects.
1247 */
1248 seq_printf(m, "processor\t: %d\n", i);
1249 cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
1250 seq_printf(m, "model name\t: %s rev %d (%s)\n",
1251 cpu_name, cpuid & 15, elf_platform);
1252
1253#if defined(CONFIG_SMP)
1254 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1255 per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
1256 (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
1257#else
1258 seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
1259 loops_per_jiffy / (500000/HZ),
1260 (loops_per_jiffy / (5000/HZ)) % 100);
1261#endif
1262 /* dump out the processor features */
1263 seq_puts(m, "Features\t: ");
1264
1265 for (j = 0; hwcap_str[j]; j++)
1266 if (elf_hwcap & (1 << j))
1267 seq_printf(m, "%s ", hwcap_str[j]);
1268
1269 for (j = 0; hwcap2_str[j]; j++)
1270 if (elf_hwcap2 & (1 << j))
1271 seq_printf(m, "%s ", hwcap2_str[j]);
1272
1273 seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
1274 seq_printf(m, "CPU architecture: %s\n",
1275 proc_arch[cpu_architecture()]);
1276
1277 if ((cpuid & 0x0008f000) == 0x00000000) {
1278 /* pre-ARM7 */
1279 seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
1280 } else {
1281 if ((cpuid & 0x0008f000) == 0x00007000) {
1282 /* ARM7 */
1283 seq_printf(m, "CPU variant\t: 0x%02x\n",
1284 (cpuid >> 16) & 127);
1285 } else {
1286 /* post-ARM7 */
1287 seq_printf(m, "CPU variant\t: 0x%x\n",
1288 (cpuid >> 20) & 15);
1289 }
1290 seq_printf(m, "CPU part\t: 0x%03x\n",
1291 (cpuid >> 4) & 0xfff);
1292 }
1293 seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
1294 }
1295
1296 seq_printf(m, "Hardware\t: %s\n", machine_name);
1297 seq_printf(m, "Revision\t: %04x\n", system_rev);
1298 seq_printf(m, "Serial\t\t: %s\n", system_serial);
1299
1300 return 0;
1301}
1302
1303static void *c_start(struct seq_file *m, loff_t *pos)
1304{
1305 return *pos < 1 ? (void *)1 : NULL;
1306}
1307
1308static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1309{
1310 ++*pos;
1311 return NULL;
1312}
1313
1314static void c_stop(struct seq_file *m, void *v)
1315{
1316}
1317
1318const struct seq_operations cpuinfo_op = {
1319 .start = c_start,
1320 .next = c_next,
1321 .stop = c_stop,
1322 .show = c_show
1323};