Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Procedures for creating, accessing and interpreting the device tree.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12#undef DEBUG
13
14#include <stdarg.h>
15#include <linux/kernel.h>
16#include <linux/string.h>
17#include <linux/init.h>
18#include <linux/threads.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/initrd.h>
24#include <linux/bitops.h>
25#include <linux/export.h>
26#include <linux/kexec.h>
27#include <linux/irq.h>
28#include <linux/memblock.h>
29#include <linux/of.h>
30#include <linux/of_fdt.h>
31#include <linux/libfdt.h>
32#include <linux/cpu.h>
33#include <linux/pgtable.h>
34
35#include <asm/prom.h>
36#include <asm/rtas.h>
37#include <asm/page.h>
38#include <asm/processor.h>
39#include <asm/irq.h>
40#include <asm/io.h>
41#include <asm/kdump.h>
42#include <asm/smp.h>
43#include <asm/mmu.h>
44#include <asm/paca.h>
45#include <asm/powernv.h>
46#include <asm/iommu.h>
47#include <asm/btext.h>
48#include <asm/sections.h>
49#include <asm/machdep.h>
50#include <asm/pci-bridge.h>
51#include <asm/kexec.h>
52#include <asm/opal.h>
53#include <asm/fadump.h>
54#include <asm/epapr_hcalls.h>
55#include <asm/firmware.h>
56#include <asm/dt_cpu_ftrs.h>
57#include <asm/drmem.h>
58#include <asm/ultravisor.h>
59
60#include <mm/mmu_decl.h>
61
62#ifdef DEBUG
63#define DBG(fmt...) printk(KERN_ERR fmt)
64#else
65#define DBG(fmt...)
66#endif
67
68int *chip_id_lookup_table;
69
70#ifdef CONFIG_PPC64
71int __initdata iommu_is_off;
72int __initdata iommu_force_on;
73unsigned long tce_alloc_start, tce_alloc_end;
74u64 ppc64_rma_size;
75#endif
76static phys_addr_t first_memblock_size;
77static int __initdata boot_cpu_count;
78
79static int __init early_parse_mem(char *p)
80{
81 if (!p)
82 return 1;
83
84 memory_limit = PAGE_ALIGN(memparse(p, &p));
85 DBG("memory limit = 0x%llx\n", memory_limit);
86
87 return 0;
88}
89early_param("mem", early_parse_mem);
90
91/*
92 * overlaps_initrd - check for overlap with page aligned extension of
93 * initrd.
94 */
95static inline int overlaps_initrd(unsigned long start, unsigned long size)
96{
97#ifdef CONFIG_BLK_DEV_INITRD
98 if (!initrd_start)
99 return 0;
100
101 return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
102 start <= ALIGN(initrd_end, PAGE_SIZE);
103#else
104 return 0;
105#endif
106}
107
108/**
109 * move_device_tree - move tree to an unused area, if needed.
110 *
111 * The device tree may be allocated beyond our memory limit, or inside the
112 * crash kernel region for kdump, or within the page aligned range of initrd.
113 * If so, move it out of the way.
114 */
115static void __init move_device_tree(void)
116{
117 unsigned long start, size;
118 void *p;
119
120 DBG("-> move_device_tree\n");
121
122 start = __pa(initial_boot_params);
123 size = fdt_totalsize(initial_boot_params);
124
125 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
126 !memblock_is_memory(start + size - 1) ||
127 overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
128 p = memblock_alloc_raw(size, PAGE_SIZE);
129 if (!p)
130 panic("Failed to allocate %lu bytes to move device tree\n",
131 size);
132 memcpy(p, initial_boot_params, size);
133 initial_boot_params = p;
134 DBG("Moved device tree to 0x%px\n", p);
135 }
136
137 DBG("<- move_device_tree\n");
138}
139
140/*
141 * ibm,pa-features is a per-cpu property that contains a string of
142 * attribute descriptors, each of which has a 2 byte header plus up
143 * to 254 bytes worth of processor attribute bits. First header
144 * byte specifies the number of bytes following the header.
145 * Second header byte is an "attribute-specifier" type, of which
146 * zero is the only currently-defined value.
147 * Implementation: Pass in the byte and bit offset for the feature
148 * that we are interested in. The function will return -1 if the
149 * pa-features property is missing, or a 1/0 to indicate if the feature
150 * is supported/not supported. Note that the bit numbers are
151 * big-endian to match the definition in PAPR.
152 */
153static struct ibm_pa_feature {
154 unsigned long cpu_features; /* CPU_FTR_xxx bit */
155 unsigned long mmu_features; /* MMU_FTR_xxx bit */
156 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
157 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
158 unsigned char pabyte; /* byte number in ibm,pa-features */
159 unsigned char pabit; /* bit number (big-endian) */
160 unsigned char invert; /* if 1, pa bit set => clear feature */
161} ibm_pa_features[] __initdata = {
162 { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
163 { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
164 { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
165 { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
166 { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
167#ifdef CONFIG_PPC_RADIX_MMU
168 { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
169#endif
170 { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
171 .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
172 /*
173 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
174 * we don't want to turn on TM here, so we use the *_COMP versions
175 * which are 0 if the kernel doesn't support TM.
176 */
177 { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
178 .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
179
180 { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
181};
182
183static void __init scan_features(unsigned long node, const unsigned char *ftrs,
184 unsigned long tablelen,
185 struct ibm_pa_feature *fp,
186 unsigned long ft_size)
187{
188 unsigned long i, len, bit;
189
190 /* find descriptor with type == 0 */
191 for (;;) {
192 if (tablelen < 3)
193 return;
194 len = 2 + ftrs[0];
195 if (tablelen < len)
196 return; /* descriptor 0 not found */
197 if (ftrs[1] == 0)
198 break;
199 tablelen -= len;
200 ftrs += len;
201 }
202
203 /* loop over bits we know about */
204 for (i = 0; i < ft_size; ++i, ++fp) {
205 if (fp->pabyte >= ftrs[0])
206 continue;
207 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
208 if (bit ^ fp->invert) {
209 cur_cpu_spec->cpu_features |= fp->cpu_features;
210 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
211 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
212 cur_cpu_spec->mmu_features |= fp->mmu_features;
213 } else {
214 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
215 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
216 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
217 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
218 }
219 }
220}
221
222static void __init check_cpu_pa_features(unsigned long node)
223{
224 const unsigned char *pa_ftrs;
225 int tablelen;
226
227 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
228 if (pa_ftrs == NULL)
229 return;
230
231 scan_features(node, pa_ftrs, tablelen,
232 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
233}
234
235#ifdef CONFIG_PPC_BOOK3S_64
236static void __init init_mmu_slb_size(unsigned long node)
237{
238 const __be32 *slb_size_ptr;
239
240 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
241 of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
242
243 if (slb_size_ptr)
244 mmu_slb_size = be32_to_cpup(slb_size_ptr);
245}
246#else
247#define init_mmu_slb_size(node) do { } while(0)
248#endif
249
250static struct feature_property {
251 const char *name;
252 u32 min_value;
253 unsigned long cpu_feature;
254 unsigned long cpu_user_ftr;
255} feature_properties[] __initdata = {
256#ifdef CONFIG_ALTIVEC
257 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
258 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
259#endif /* CONFIG_ALTIVEC */
260#ifdef CONFIG_VSX
261 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
262 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
263#endif /* CONFIG_VSX */
264#ifdef CONFIG_PPC64
265 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
266 {"ibm,purr", 1, CPU_FTR_PURR, 0},
267 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
268#endif /* CONFIG_PPC64 */
269};
270
271#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
272static __init void identical_pvr_fixup(unsigned long node)
273{
274 unsigned int pvr;
275 const char *model = of_get_flat_dt_prop(node, "model", NULL);
276
277 /*
278 * Since 440GR(x)/440EP(x) processors have the same pvr,
279 * we check the node path and set bit 28 in the cur_cpu_spec
280 * pvr for EP(x) processor version. This bit is always 0 in
281 * the "real" pvr. Then we call identify_cpu again with
282 * the new logical pvr to enable FPU support.
283 */
284 if (model && strstr(model, "440EP")) {
285 pvr = cur_cpu_spec->pvr_value | 0x8;
286 identify_cpu(0, pvr);
287 DBG("Using logical pvr %x for %s\n", pvr, model);
288 }
289}
290#else
291#define identical_pvr_fixup(node) do { } while(0)
292#endif
293
294static void __init check_cpu_feature_properties(unsigned long node)
295{
296 int i;
297 struct feature_property *fp = feature_properties;
298 const __be32 *prop;
299
300 for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
301 prop = of_get_flat_dt_prop(node, fp->name, NULL);
302 if (prop && be32_to_cpup(prop) >= fp->min_value) {
303 cur_cpu_spec->cpu_features |= fp->cpu_feature;
304 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
305 }
306 }
307}
308
309static int __init early_init_dt_scan_cpus(unsigned long node,
310 const char *uname, int depth,
311 void *data)
312{
313 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
314 const __be32 *prop;
315 const __be32 *intserv;
316 int i, nthreads;
317 int len;
318 int found = -1;
319 int found_thread = 0;
320
321 /* We are scanning "cpu" nodes only */
322 if (type == NULL || strcmp(type, "cpu") != 0)
323 return 0;
324
325 /* Get physical cpuid */
326 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
327 if (!intserv)
328 intserv = of_get_flat_dt_prop(node, "reg", &len);
329
330 nthreads = len / sizeof(int);
331
332 /*
333 * Now see if any of these threads match our boot cpu.
334 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
335 */
336 for (i = 0; i < nthreads; i++) {
337 if (be32_to_cpu(intserv[i]) ==
338 fdt_boot_cpuid_phys(initial_boot_params)) {
339 found = boot_cpu_count;
340 found_thread = i;
341 }
342#ifdef CONFIG_SMP
343 /* logical cpu id is always 0 on UP kernels */
344 boot_cpu_count++;
345#endif
346 }
347
348 /* Not the boot CPU */
349 if (found < 0)
350 return 0;
351
352 DBG("boot cpu: logical %d physical %d\n", found,
353 be32_to_cpu(intserv[found_thread]));
354 boot_cpuid = found;
355
356 /*
357 * PAPR defines "logical" PVR values for cpus that
358 * meet various levels of the architecture:
359 * 0x0f000001 Architecture version 2.04
360 * 0x0f000002 Architecture version 2.05
361 * If the cpu-version property in the cpu node contains
362 * such a value, we call identify_cpu again with the
363 * logical PVR value in order to use the cpu feature
364 * bits appropriate for the architecture level.
365 *
366 * A POWER6 partition in "POWER6 architected" mode
367 * uses the 0x0f000002 PVR value; in POWER5+ mode
368 * it uses 0x0f000001.
369 *
370 * If we're using device tree CPU feature discovery then we don't
371 * support the cpu-version property, and it's the responsibility of the
372 * firmware/hypervisor to provide the correct feature set for the
373 * architecture level via the ibm,powerpc-cpu-features binding.
374 */
375 if (!dt_cpu_ftrs_in_use()) {
376 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
377 if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
378 identify_cpu(0, be32_to_cpup(prop));
379
380 check_cpu_feature_properties(node);
381 check_cpu_pa_features(node);
382 }
383
384 identical_pvr_fixup(node);
385 init_mmu_slb_size(node);
386
387#ifdef CONFIG_PPC64
388 if (nthreads == 1)
389 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
390 else if (!dt_cpu_ftrs_in_use())
391 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
392 allocate_paca(boot_cpuid);
393#endif
394 set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread]));
395
396 return 0;
397}
398
399static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
400 const char *uname,
401 int depth, void *data)
402{
403 const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
404
405 /* Use common scan routine to determine if this is the chosen node */
406 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
407 return 0;
408
409#ifdef CONFIG_PPC64
410 /* check if iommu is forced on or off */
411 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
412 iommu_is_off = 1;
413 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
414 iommu_force_on = 1;
415#endif
416
417 /* mem=x on the command line is the preferred mechanism */
418 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
419 if (lprop)
420 memory_limit = *lprop;
421
422#ifdef CONFIG_PPC64
423 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
424 if (lprop)
425 tce_alloc_start = *lprop;
426 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
427 if (lprop)
428 tce_alloc_end = *lprop;
429#endif
430
431#ifdef CONFIG_KEXEC_CORE
432 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
433 if (lprop)
434 crashk_res.start = *lprop;
435
436 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
437 if (lprop)
438 crashk_res.end = crashk_res.start + *lprop - 1;
439#endif
440
441 /* break now */
442 return 1;
443}
444
445/*
446 * Compare the range against max mem limit and update
447 * size if it cross the limit.
448 */
449
450#ifdef CONFIG_SPARSEMEM
451static bool validate_mem_limit(u64 base, u64 *size)
452{
453 u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
454
455 if (base >= max_mem)
456 return false;
457 if ((base + *size) > max_mem)
458 *size = max_mem - base;
459 return true;
460}
461#else
462static bool validate_mem_limit(u64 base, u64 *size)
463{
464 return true;
465}
466#endif
467
468#ifdef CONFIG_PPC_PSERIES
469/*
470 * Interpret the ibm dynamic reconfiguration memory LMBs.
471 * This contains a list of memory blocks along with NUMA affinity
472 * information.
473 */
474static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
475 const __be32 **usm,
476 void *data)
477{
478 u64 base, size;
479 int is_kexec_kdump = 0, rngs;
480
481 base = lmb->base_addr;
482 size = drmem_lmb_size();
483 rngs = 1;
484
485 /*
486 * Skip this block if the reserved bit is set in flags
487 * or if the block is not assigned to this partition.
488 */
489 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
490 !(lmb->flags & DRCONF_MEM_ASSIGNED))
491 return 0;
492
493 if (*usm)
494 is_kexec_kdump = 1;
495
496 if (is_kexec_kdump) {
497 /*
498 * For each memblock in ibm,dynamic-memory, a
499 * corresponding entry in linux,drconf-usable-memory
500 * property contains a counter 'p' followed by 'p'
501 * (base, size) duple. Now read the counter from
502 * linux,drconf-usable-memory property
503 */
504 rngs = dt_mem_next_cell(dt_root_size_cells, usm);
505 if (!rngs) /* there are no (base, size) duple */
506 return 0;
507 }
508
509 do {
510 if (is_kexec_kdump) {
511 base = dt_mem_next_cell(dt_root_addr_cells, usm);
512 size = dt_mem_next_cell(dt_root_size_cells, usm);
513 }
514
515 if (iommu_is_off) {
516 if (base >= 0x80000000ul)
517 continue;
518 if ((base + size) > 0x80000000ul)
519 size = 0x80000000ul - base;
520 }
521
522 if (!validate_mem_limit(base, &size))
523 continue;
524
525 DBG("Adding: %llx -> %llx\n", base, size);
526 memblock_add(base, size);
527
528 if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
529 memblock_mark_hotplug(base, size);
530 } while (--rngs);
531
532 return 0;
533}
534#endif /* CONFIG_PPC_PSERIES */
535
536static int __init early_init_dt_scan_memory_ppc(unsigned long node,
537 const char *uname,
538 int depth, void *data)
539{
540#ifdef CONFIG_PPC_PSERIES
541 if (depth == 1 &&
542 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) {
543 walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
544 return 0;
545 }
546#endif
547
548 return early_init_dt_scan_memory(node, uname, depth, data);
549}
550
551/*
552 * For a relocatable kernel, we need to get the memstart_addr first,
553 * then use it to calculate the virtual kernel start address. This has
554 * to happen at a very early stage (before machine_init). In this case,
555 * we just want to get the memstart_address and would not like to mess the
556 * memblock at this stage. So introduce a variable to skip the memblock_add()
557 * for this reason.
558 */
559#ifdef CONFIG_RELOCATABLE
560static int add_mem_to_memblock = 1;
561#else
562#define add_mem_to_memblock 1
563#endif
564
565void __init early_init_dt_add_memory_arch(u64 base, u64 size)
566{
567#ifdef CONFIG_PPC64
568 if (iommu_is_off) {
569 if (base >= 0x80000000ul)
570 return;
571 if ((base + size) > 0x80000000ul)
572 size = 0x80000000ul - base;
573 }
574#endif
575 /* Keep track of the beginning of memory -and- the size of
576 * the very first block in the device-tree as it represents
577 * the RMA on ppc64 server
578 */
579 if (base < memstart_addr) {
580 memstart_addr = base;
581 first_memblock_size = size;
582 }
583
584 /* Add the chunk to the MEMBLOCK list */
585 if (add_mem_to_memblock) {
586 if (validate_mem_limit(base, &size))
587 memblock_add(base, size);
588 }
589}
590
591static void __init early_reserve_mem_dt(void)
592{
593 unsigned long i, dt_root;
594 int len;
595 const __be32 *prop;
596
597 early_init_fdt_reserve_self();
598 early_init_fdt_scan_reserved_mem();
599
600 dt_root = of_get_flat_dt_root();
601
602 prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
603
604 if (!prop)
605 return;
606
607 DBG("Found new-style reserved-ranges\n");
608
609 /* Each reserved range is an (address,size) pair, 2 cells each,
610 * totalling 4 cells per range. */
611 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
612 u64 base, size;
613
614 base = of_read_number(prop + (i * 4) + 0, 2);
615 size = of_read_number(prop + (i * 4) + 2, 2);
616
617 if (size) {
618 DBG("reserving: %llx -> %llx\n", base, size);
619 memblock_reserve(base, size);
620 }
621 }
622}
623
624static void __init early_reserve_mem(void)
625{
626 __be64 *reserve_map;
627
628 reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
629 fdt_off_mem_rsvmap(initial_boot_params));
630
631 /* Look for the new "reserved-regions" property in the DT */
632 early_reserve_mem_dt();
633
634#ifdef CONFIG_BLK_DEV_INITRD
635 /* Then reserve the initrd, if any */
636 if (initrd_start && (initrd_end > initrd_start)) {
637 memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
638 ALIGN(initrd_end, PAGE_SIZE) -
639 ALIGN_DOWN(initrd_start, PAGE_SIZE));
640 }
641#endif /* CONFIG_BLK_DEV_INITRD */
642
643#ifdef CONFIG_PPC32
644 /*
645 * Handle the case where we might be booting from an old kexec
646 * image that setup the mem_rsvmap as pairs of 32-bit values
647 */
648 if (be64_to_cpup(reserve_map) > 0xffffffffull) {
649 u32 base_32, size_32;
650 __be32 *reserve_map_32 = (__be32 *)reserve_map;
651
652 DBG("Found old 32-bit reserve map\n");
653
654 while (1) {
655 base_32 = be32_to_cpup(reserve_map_32++);
656 size_32 = be32_to_cpup(reserve_map_32++);
657 if (size_32 == 0)
658 break;
659 DBG("reserving: %x -> %x\n", base_32, size_32);
660 memblock_reserve(base_32, size_32);
661 }
662 return;
663 }
664#endif
665}
666
667#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
668static bool tm_disabled __initdata;
669
670static int __init parse_ppc_tm(char *str)
671{
672 bool res;
673
674 if (kstrtobool(str, &res))
675 return -EINVAL;
676
677 tm_disabled = !res;
678
679 return 0;
680}
681early_param("ppc_tm", parse_ppc_tm);
682
683static void __init tm_init(void)
684{
685 if (tm_disabled) {
686 pr_info("Disabling hardware transactional memory (HTM)\n");
687 cur_cpu_spec->cpu_user_features2 &=
688 ~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
689 cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
690 return;
691 }
692
693 pnv_tm_init();
694}
695#else
696static void tm_init(void) { }
697#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
698
699#ifdef CONFIG_PPC64
700static void __init save_fscr_to_task(void)
701{
702 /*
703 * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
704 * have configured via the device tree features or via __init_FSCR().
705 * That value will then be propagated to pid 1 (init) and all future
706 * processes.
707 */
708 if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
709 init_task.thread.fscr = mfspr(SPRN_FSCR);
710}
711#else
712static inline void save_fscr_to_task(void) {}
713#endif
714
715
716void __init early_init_devtree(void *params)
717{
718 phys_addr_t limit;
719
720 DBG(" -> early_init_devtree(%px)\n", params);
721
722 /* Too early to BUG_ON(), do it by hand */
723 if (!early_init_dt_verify(params))
724 panic("BUG: Failed verifying flat device tree, bad version?");
725
726#ifdef CONFIG_PPC_RTAS
727 /* Some machines might need RTAS info for debugging, grab it now. */
728 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
729#endif
730
731#ifdef CONFIG_PPC_POWERNV
732 /* Some machines might need OPAL info for debugging, grab it now. */
733 of_scan_flat_dt(early_init_dt_scan_opal, NULL);
734
735 /* Scan tree for ultravisor feature */
736 of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
737#endif
738
739#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
740 /* scan tree to see if dump is active during last boot */
741 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
742#endif
743
744 /* Retrieve various informations from the /chosen node of the
745 * device-tree, including the platform type, initrd location and
746 * size, TCE reserve, and more ...
747 */
748 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
749
750 /* Scan memory nodes and rebuild MEMBLOCKs */
751 of_scan_flat_dt(early_init_dt_scan_root, NULL);
752 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
753
754 parse_early_param();
755
756 /* make sure we've parsed cmdline for mem= before this */
757 if (memory_limit)
758 first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
759 setup_initial_memory_limit(memstart_addr, first_memblock_size);
760 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
761 memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
762 /* If relocatable, reserve first 32k for interrupt vectors etc. */
763 if (PHYSICAL_START > MEMORY_START)
764 memblock_reserve(MEMORY_START, 0x8000);
765 reserve_kdump_trampoline();
766#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
767 /*
768 * If we fail to reserve memory for firmware-assisted dump then
769 * fallback to kexec based kdump.
770 */
771 if (fadump_reserve_mem() == 0)
772#endif
773 reserve_crashkernel();
774 early_reserve_mem();
775
776 /* Ensure that total memory size is page-aligned. */
777 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
778 memblock_enforce_memory_limit(limit);
779
780#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
781 if (!early_radix_enabled())
782 memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
783#endif
784
785 memblock_allow_resize();
786 memblock_dump_all();
787
788 DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
789
790 /* We may need to relocate the flat tree, do it now.
791 * FIXME .. and the initrd too? */
792 move_device_tree();
793
794 allocate_paca_ptrs();
795
796 DBG("Scanning CPUs ...\n");
797
798 dt_cpu_ftrs_scan();
799
800 /* Retrieve CPU related informations from the flat tree
801 * (altivec support, boot CPU ID, ...)
802 */
803 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
804 if (boot_cpuid < 0) {
805 printk("Failed to identify boot CPU !\n");
806 BUG();
807 }
808
809 save_fscr_to_task();
810
811#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
812 /* We'll later wait for secondaries to check in; there are
813 * NCPUS-1 non-boot CPUs :-)
814 */
815 spinning_secondaries = boot_cpu_count - 1;
816#endif
817
818 mmu_early_init_devtree();
819
820#ifdef CONFIG_PPC_POWERNV
821 /* Scan and build the list of machine check recoverable ranges */
822 of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
823#endif
824 epapr_paravirt_early_init();
825
826 /* Now try to figure out if we are running on LPAR and so on */
827 pseries_probe_fw_features();
828
829 /*
830 * Initialize pkey features and default AMR/IAMR values
831 */
832 pkey_early_init_devtree();
833
834#ifdef CONFIG_PPC_PS3
835 /* Identify PS3 firmware */
836 if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
837 powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
838#endif
839
840 tm_init();
841
842 DBG(" <- early_init_devtree()\n");
843}
844
845#ifdef CONFIG_RELOCATABLE
846/*
847 * This function run before early_init_devtree, so we have to init
848 * initial_boot_params.
849 */
850void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
851{
852 /* Setup flat device-tree pointer */
853 initial_boot_params = params;
854
855 /*
856 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
857 * mess the memblock.
858 */
859 add_mem_to_memblock = 0;
860 of_scan_flat_dt(early_init_dt_scan_root, NULL);
861 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
862 add_mem_to_memblock = 1;
863
864 if (size)
865 *size = first_memblock_size;
866}
867#endif
868
869/*******
870 *
871 * New implementation of the OF "find" APIs, return a refcounted
872 * object, call of_node_put() when done. The device tree and list
873 * are protected by a rw_lock.
874 *
875 * Note that property management will need some locking as well,
876 * this isn't dealt with yet.
877 *
878 *******/
879
880/**
881 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
882 * @np: device node of the device
883 *
884 * This looks for a property "ibm,chip-id" in the node or any
885 * of its parents and returns its content, or -1 if it cannot
886 * be found.
887 */
888int of_get_ibm_chip_id(struct device_node *np)
889{
890 of_node_get(np);
891 while (np) {
892 u32 chip_id;
893
894 /*
895 * Skiboot may produce memory nodes that contain more than one
896 * cell in chip-id, we only read the first one here.
897 */
898 if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
899 of_node_put(np);
900 return chip_id;
901 }
902
903 np = of_get_next_parent(np);
904 }
905 return -1;
906}
907EXPORT_SYMBOL(of_get_ibm_chip_id);
908
909/**
910 * cpu_to_chip_id - Return the cpus chip-id
911 * @cpu: The logical cpu number.
912 *
913 * Return the value of the ibm,chip-id property corresponding to the given
914 * logical cpu number. If the chip-id can not be found, returns -1.
915 */
916int cpu_to_chip_id(int cpu)
917{
918 struct device_node *np;
919 int ret = -1, idx;
920
921 idx = cpu / threads_per_core;
922 if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
923 return chip_id_lookup_table[idx];
924
925 np = of_get_cpu_node(cpu, NULL);
926 if (np) {
927 ret = of_get_ibm_chip_id(np);
928 of_node_put(np);
929
930 if (chip_id_lookup_table)
931 chip_id_lookup_table[idx] = ret;
932 }
933
934 return ret;
935}
936EXPORT_SYMBOL(cpu_to_chip_id);
937
938bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
939{
940#ifdef CONFIG_SMP
941 /*
942 * Early firmware scanning must use this rather than
943 * get_hard_smp_processor_id because we don't have pacas allocated
944 * until memory topology is discovered.
945 */
946 if (cpu_to_phys_id != NULL)
947 return (int)phys_id == cpu_to_phys_id[cpu];
948#endif
949
950 return (int)phys_id == get_hard_smp_processor_id(cpu);
951}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Procedures for creating, accessing and interpreting the device tree.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12#undef DEBUG
13
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/init.h>
17#include <linux/threads.h>
18#include <linux/spinlock.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/initrd.h>
23#include <linux/bitops.h>
24#include <linux/export.h>
25#include <linux/kexec.h>
26#include <linux/irq.h>
27#include <linux/memblock.h>
28#include <linux/of.h>
29#include <linux/of_fdt.h>
30#include <linux/libfdt.h>
31#include <linux/cpu.h>
32#include <linux/pgtable.h>
33#include <linux/seq_buf.h>
34
35#include <asm/rtas.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/kdump.h>
41#include <asm/smp.h>
42#include <asm/mmu.h>
43#include <asm/paca.h>
44#include <asm/powernv.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/setup.h>
49#include <asm/pci-bridge.h>
50#include <asm/kexec.h>
51#include <asm/opal.h>
52#include <asm/fadump.h>
53#include <asm/epapr_hcalls.h>
54#include <asm/firmware.h>
55#include <asm/dt_cpu_ftrs.h>
56#include <asm/drmem.h>
57#include <asm/ultravisor.h>
58#include <asm/prom.h>
59#include <asm/plpks.h>
60
61#include <mm/mmu_decl.h>
62
63#ifdef DEBUG
64#define DBG(fmt...) printk(KERN_ERR fmt)
65#else
66#define DBG(fmt...)
67#endif
68
69int *chip_id_lookup_table;
70
71#ifdef CONFIG_PPC64
72int __initdata iommu_is_off;
73int __initdata iommu_force_on;
74unsigned long tce_alloc_start, tce_alloc_end;
75u64 ppc64_rma_size;
76unsigned int boot_cpu_node_count __ro_after_init;
77#endif
78static phys_addr_t first_memblock_size;
79static int __initdata boot_cpu_count;
80
81static int __init early_parse_mem(char *p)
82{
83 if (!p)
84 return 1;
85
86 memory_limit = PAGE_ALIGN(memparse(p, &p));
87 DBG("memory limit = 0x%llx\n", memory_limit);
88
89 return 0;
90}
91early_param("mem", early_parse_mem);
92
93/*
94 * overlaps_initrd - check for overlap with page aligned extension of
95 * initrd.
96 */
97static inline int overlaps_initrd(unsigned long start, unsigned long size)
98{
99#ifdef CONFIG_BLK_DEV_INITRD
100 if (!initrd_start)
101 return 0;
102
103 return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
104 start <= ALIGN(initrd_end, PAGE_SIZE);
105#else
106 return 0;
107#endif
108}
109
110/**
111 * move_device_tree - move tree to an unused area, if needed.
112 *
113 * The device tree may be allocated beyond our memory limit, or inside the
114 * crash kernel region for kdump, or within the page aligned range of initrd.
115 * If so, move it out of the way.
116 */
117static void __init move_device_tree(void)
118{
119 unsigned long start, size;
120 void *p;
121
122 DBG("-> move_device_tree\n");
123
124 start = __pa(initial_boot_params);
125 size = fdt_totalsize(initial_boot_params);
126
127 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
128 !memblock_is_memory(start + size - 1) ||
129 overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
130 p = memblock_alloc_raw(size, PAGE_SIZE);
131 if (!p)
132 panic("Failed to allocate %lu bytes to move device tree\n",
133 size);
134 memcpy(p, initial_boot_params, size);
135 initial_boot_params = p;
136 DBG("Moved device tree to 0x%px\n", p);
137 }
138
139 DBG("<- move_device_tree\n");
140}
141
142/*
143 * ibm,pa/pi-features is a per-cpu property that contains a string of
144 * attribute descriptors, each of which has a 2 byte header plus up
145 * to 254 bytes worth of processor attribute bits. First header
146 * byte specifies the number of bytes following the header.
147 * Second header byte is an "attribute-specifier" type, of which
148 * zero is the only currently-defined value.
149 * Implementation: Pass in the byte and bit offset for the feature
150 * that we are interested in. The function will return -1 if the
151 * pa-features property is missing, or a 1/0 to indicate if the feature
152 * is supported/not supported. Note that the bit numbers are
153 * big-endian to match the definition in PAPR.
154 */
155struct ibm_feature {
156 unsigned long cpu_features; /* CPU_FTR_xxx bit */
157 unsigned long mmu_features; /* MMU_FTR_xxx bit */
158 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
159 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
160 unsigned char pabyte; /* byte number in ibm,pa/pi-features */
161 unsigned char pabit; /* bit number (big-endian) */
162 unsigned char invert; /* if 1, pa bit set => clear feature */
163};
164
165static struct ibm_feature ibm_pa_features[] __initdata = {
166 { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
167 { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
168 { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
169 { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
170 { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
171#ifdef CONFIG_PPC_RADIX_MMU
172 { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
173#endif
174 { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
175 .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
176 /*
177 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
178 * we don't want to turn on TM here, so we use the *_COMP versions
179 * which are 0 if the kernel doesn't support TM.
180 */
181 { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
182 .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
183
184 { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
185 { .pabyte = 68, .pabit = 5, .cpu_features = CPU_FTR_DEXCR_NPHIE },
186};
187
188/*
189 * ibm,pi-features property provides the support of processor specific
190 * options not described in ibm,pa-features. Right now use byte 0, bit 3
191 * which indicates the occurrence of DSI interrupt when the paste operation
192 * on the suspended NX window.
193 */
194static struct ibm_feature ibm_pi_features[] __initdata = {
195 { .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI },
196};
197
198static void __init scan_features(unsigned long node, const unsigned char *ftrs,
199 unsigned long tablelen,
200 struct ibm_feature *fp,
201 unsigned long ft_size)
202{
203 unsigned long i, len, bit;
204
205 /* find descriptor with type == 0 */
206 for (;;) {
207 if (tablelen < 3)
208 return;
209 len = 2 + ftrs[0];
210 if (tablelen < len)
211 return; /* descriptor 0 not found */
212 if (ftrs[1] == 0)
213 break;
214 tablelen -= len;
215 ftrs += len;
216 }
217
218 /* loop over bits we know about */
219 for (i = 0; i < ft_size; ++i, ++fp) {
220 if (fp->pabyte >= ftrs[0])
221 continue;
222 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
223 if (bit ^ fp->invert) {
224 cur_cpu_spec->cpu_features |= fp->cpu_features;
225 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
226 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
227 cur_cpu_spec->mmu_features |= fp->mmu_features;
228 } else {
229 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
230 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
231 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
232 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
233 }
234 }
235}
236
237static void __init check_cpu_features(unsigned long node, char *name,
238 struct ibm_feature *fp,
239 unsigned long size)
240{
241 const unsigned char *pa_ftrs;
242 int tablelen;
243
244 pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
245 if (pa_ftrs == NULL)
246 return;
247
248 scan_features(node, pa_ftrs, tablelen, fp, size);
249}
250
251#ifdef CONFIG_PPC_64S_HASH_MMU
252static void __init init_mmu_slb_size(unsigned long node)
253{
254 const __be32 *slb_size_ptr;
255
256 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
257 of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
258
259 if (slb_size_ptr)
260 mmu_slb_size = be32_to_cpup(slb_size_ptr);
261}
262#else
263#define init_mmu_slb_size(node) do { } while(0)
264#endif
265
266static struct feature_property {
267 const char *name;
268 u32 min_value;
269 unsigned long cpu_feature;
270 unsigned long cpu_user_ftr;
271} feature_properties[] __initdata = {
272#ifdef CONFIG_ALTIVEC
273 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
274 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
275#endif /* CONFIG_ALTIVEC */
276#ifdef CONFIG_VSX
277 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
278 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
279#endif /* CONFIG_VSX */
280#ifdef CONFIG_PPC64
281 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
282 {"ibm,purr", 1, CPU_FTR_PURR, 0},
283 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
284#endif /* CONFIG_PPC64 */
285};
286
287#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
288static __init void identical_pvr_fixup(unsigned long node)
289{
290 unsigned int pvr;
291 const char *model = of_get_flat_dt_prop(node, "model", NULL);
292
293 /*
294 * Since 440GR(x)/440EP(x) processors have the same pvr,
295 * we check the node path and set bit 28 in the cur_cpu_spec
296 * pvr for EP(x) processor version. This bit is always 0 in
297 * the "real" pvr. Then we call identify_cpu again with
298 * the new logical pvr to enable FPU support.
299 */
300 if (model && strstr(model, "440EP")) {
301 pvr = cur_cpu_spec->pvr_value | 0x8;
302 identify_cpu(0, pvr);
303 DBG("Using logical pvr %x for %s\n", pvr, model);
304 }
305}
306#else
307#define identical_pvr_fixup(node) do { } while(0)
308#endif
309
310static void __init check_cpu_feature_properties(unsigned long node)
311{
312 int i;
313 struct feature_property *fp = feature_properties;
314 const __be32 *prop;
315
316 for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
317 prop = of_get_flat_dt_prop(node, fp->name, NULL);
318 if (prop && be32_to_cpup(prop) >= fp->min_value) {
319 cur_cpu_spec->cpu_features |= fp->cpu_feature;
320 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
321 }
322 }
323}
324
325static int __init early_init_dt_scan_cpus(unsigned long node,
326 const char *uname, int depth,
327 void *data)
328{
329 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
330 const __be32 *prop;
331 const __be32 *intserv;
332 int i, nthreads;
333 int len;
334 int found = -1;
335 int found_thread = 0;
336
337 /* We are scanning "cpu" nodes only */
338 if (type == NULL || strcmp(type, "cpu") != 0)
339 return 0;
340
341 if (IS_ENABLED(CONFIG_PPC64))
342 boot_cpu_node_count++;
343
344 /* Get physical cpuid */
345 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
346 if (!intserv)
347 intserv = of_get_flat_dt_prop(node, "reg", &len);
348
349 nthreads = len / sizeof(int);
350
351 /*
352 * Now see if any of these threads match our boot cpu.
353 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
354 */
355 for (i = 0; i < nthreads; i++) {
356 if (be32_to_cpu(intserv[i]) ==
357 fdt_boot_cpuid_phys(initial_boot_params)) {
358 found = boot_cpu_count;
359 found_thread = i;
360 }
361#ifdef CONFIG_SMP
362 /* logical cpu id is always 0 on UP kernels */
363 boot_cpu_count++;
364#endif
365 }
366
367 /* Not the boot CPU */
368 if (found < 0)
369 return 0;
370
371 DBG("boot cpu: logical %d physical %d\n", found,
372 be32_to_cpu(intserv[found_thread]));
373 boot_cpuid = found;
374
375 if (IS_ENABLED(CONFIG_PPC64))
376 boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
377
378 /*
379 * PAPR defines "logical" PVR values for cpus that
380 * meet various levels of the architecture:
381 * 0x0f000001 Architecture version 2.04
382 * 0x0f000002 Architecture version 2.05
383 * If the cpu-version property in the cpu node contains
384 * such a value, we call identify_cpu again with the
385 * logical PVR value in order to use the cpu feature
386 * bits appropriate for the architecture level.
387 *
388 * A POWER6 partition in "POWER6 architected" mode
389 * uses the 0x0f000002 PVR value; in POWER5+ mode
390 * it uses 0x0f000001.
391 *
392 * If we're using device tree CPU feature discovery then we don't
393 * support the cpu-version property, and it's the responsibility of the
394 * firmware/hypervisor to provide the correct feature set for the
395 * architecture level via the ibm,powerpc-cpu-features binding.
396 */
397 if (!dt_cpu_ftrs_in_use()) {
398 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
399 if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
400 identify_cpu(0, be32_to_cpup(prop));
401 seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
402 }
403
404 check_cpu_feature_properties(node);
405 check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
406 ARRAY_SIZE(ibm_pa_features));
407 check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
408 ARRAY_SIZE(ibm_pi_features));
409 }
410
411 identical_pvr_fixup(node);
412 init_mmu_slb_size(node);
413
414#ifdef CONFIG_PPC64
415 if (nthreads == 1)
416 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
417 else if (!dt_cpu_ftrs_in_use())
418 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
419#endif
420
421 return 0;
422}
423
424static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
425 const char *uname,
426 int depth, void *data)
427{
428 const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
429
430 /* Use common scan routine to determine if this is the chosen node */
431 if (early_init_dt_scan_chosen(data) < 0)
432 return 0;
433
434#ifdef CONFIG_PPC64
435 /* check if iommu is forced on or off */
436 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
437 iommu_is_off = 1;
438 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
439 iommu_force_on = 1;
440#endif
441
442 /* mem=x on the command line is the preferred mechanism */
443 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
444 if (lprop)
445 memory_limit = *lprop;
446
447#ifdef CONFIG_PPC64
448 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
449 if (lprop)
450 tce_alloc_start = *lprop;
451 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
452 if (lprop)
453 tce_alloc_end = *lprop;
454#endif
455
456#ifdef CONFIG_KEXEC_CORE
457 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
458 if (lprop)
459 crashk_res.start = *lprop;
460
461 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
462 if (lprop)
463 crashk_res.end = crashk_res.start + *lprop - 1;
464#endif
465
466 /* break now */
467 return 1;
468}
469
470/*
471 * Compare the range against max mem limit and update
472 * size if it cross the limit.
473 */
474
475#ifdef CONFIG_SPARSEMEM
476static bool __init validate_mem_limit(u64 base, u64 *size)
477{
478 u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
479
480 if (base >= max_mem)
481 return false;
482 if ((base + *size) > max_mem)
483 *size = max_mem - base;
484 return true;
485}
486#else
487static bool __init validate_mem_limit(u64 base, u64 *size)
488{
489 return true;
490}
491#endif
492
493#ifdef CONFIG_PPC_PSERIES
494/*
495 * Interpret the ibm dynamic reconfiguration memory LMBs.
496 * This contains a list of memory blocks along with NUMA affinity
497 * information.
498 */
499static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
500 const __be32 **usm,
501 void *data)
502{
503 u64 base, size;
504 int is_kexec_kdump = 0, rngs;
505
506 base = lmb->base_addr;
507 size = drmem_lmb_size();
508 rngs = 1;
509
510 /*
511 * Skip this block if the reserved bit is set in flags
512 * or if the block is not assigned to this partition.
513 */
514 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
515 !(lmb->flags & DRCONF_MEM_ASSIGNED))
516 return 0;
517
518 if (*usm)
519 is_kexec_kdump = 1;
520
521 if (is_kexec_kdump) {
522 /*
523 * For each memblock in ibm,dynamic-memory, a
524 * corresponding entry in linux,drconf-usable-memory
525 * property contains a counter 'p' followed by 'p'
526 * (base, size) duple. Now read the counter from
527 * linux,drconf-usable-memory property
528 */
529 rngs = dt_mem_next_cell(dt_root_size_cells, usm);
530 if (!rngs) /* there are no (base, size) duple */
531 return 0;
532 }
533
534 do {
535 if (is_kexec_kdump) {
536 base = dt_mem_next_cell(dt_root_addr_cells, usm);
537 size = dt_mem_next_cell(dt_root_size_cells, usm);
538 }
539
540 if (iommu_is_off) {
541 if (base >= 0x80000000ul)
542 continue;
543 if ((base + size) > 0x80000000ul)
544 size = 0x80000000ul - base;
545 }
546
547 if (!validate_mem_limit(base, &size))
548 continue;
549
550 DBG("Adding: %llx -> %llx\n", base, size);
551 memblock_add(base, size);
552
553 if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
554 memblock_mark_hotplug(base, size);
555 } while (--rngs);
556
557 return 0;
558}
559#endif /* CONFIG_PPC_PSERIES */
560
561static int __init early_init_dt_scan_memory_ppc(void)
562{
563#ifdef CONFIG_PPC_PSERIES
564 const void *fdt = initial_boot_params;
565 int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
566
567 if (node > 0)
568 walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
569
570#endif
571
572 return early_init_dt_scan_memory();
573}
574
575/*
576 * For a relocatable kernel, we need to get the memstart_addr first,
577 * then use it to calculate the virtual kernel start address. This has
578 * to happen at a very early stage (before machine_init). In this case,
579 * we just want to get the memstart_address and would not like to mess the
580 * memblock at this stage. So introduce a variable to skip the memblock_add()
581 * for this reason.
582 */
583#ifdef CONFIG_RELOCATABLE
584static int add_mem_to_memblock = 1;
585#else
586#define add_mem_to_memblock 1
587#endif
588
589void __init early_init_dt_add_memory_arch(u64 base, u64 size)
590{
591#ifdef CONFIG_PPC64
592 if (iommu_is_off) {
593 if (base >= 0x80000000ul)
594 return;
595 if ((base + size) > 0x80000000ul)
596 size = 0x80000000ul - base;
597 }
598#endif
599 /* Keep track of the beginning of memory -and- the size of
600 * the very first block in the device-tree as it represents
601 * the RMA on ppc64 server
602 */
603 if (base < memstart_addr) {
604 memstart_addr = base;
605 first_memblock_size = size;
606 }
607
608 /* Add the chunk to the MEMBLOCK list */
609 if (add_mem_to_memblock) {
610 if (validate_mem_limit(base, &size))
611 memblock_add(base, size);
612 }
613}
614
615static void __init early_reserve_mem_dt(void)
616{
617 unsigned long i, dt_root;
618 int len;
619 const __be32 *prop;
620
621 early_init_fdt_reserve_self();
622 early_init_fdt_scan_reserved_mem();
623
624 dt_root = of_get_flat_dt_root();
625
626 prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
627
628 if (!prop)
629 return;
630
631 DBG("Found new-style reserved-ranges\n");
632
633 /* Each reserved range is an (address,size) pair, 2 cells each,
634 * totalling 4 cells per range. */
635 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
636 u64 base, size;
637
638 base = of_read_number(prop + (i * 4) + 0, 2);
639 size = of_read_number(prop + (i * 4) + 2, 2);
640
641 if (size) {
642 DBG("reserving: %llx -> %llx\n", base, size);
643 memblock_reserve(base, size);
644 }
645 }
646}
647
648static void __init early_reserve_mem(void)
649{
650 __be64 *reserve_map;
651
652 reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
653 fdt_off_mem_rsvmap(initial_boot_params));
654
655 /* Look for the new "reserved-regions" property in the DT */
656 early_reserve_mem_dt();
657
658#ifdef CONFIG_BLK_DEV_INITRD
659 /* Then reserve the initrd, if any */
660 if (initrd_start && (initrd_end > initrd_start)) {
661 memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
662 ALIGN(initrd_end, PAGE_SIZE) -
663 ALIGN_DOWN(initrd_start, PAGE_SIZE));
664 }
665#endif /* CONFIG_BLK_DEV_INITRD */
666
667 if (!IS_ENABLED(CONFIG_PPC32))
668 return;
669
670 /*
671 * Handle the case where we might be booting from an old kexec
672 * image that setup the mem_rsvmap as pairs of 32-bit values
673 */
674 if (be64_to_cpup(reserve_map) > 0xffffffffull) {
675 u32 base_32, size_32;
676 __be32 *reserve_map_32 = (__be32 *)reserve_map;
677
678 DBG("Found old 32-bit reserve map\n");
679
680 while (1) {
681 base_32 = be32_to_cpup(reserve_map_32++);
682 size_32 = be32_to_cpup(reserve_map_32++);
683 if (size_32 == 0)
684 break;
685 DBG("reserving: %x -> %x\n", base_32, size_32);
686 memblock_reserve(base_32, size_32);
687 }
688 return;
689 }
690}
691
692#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
693static bool tm_disabled __initdata;
694
695static int __init parse_ppc_tm(char *str)
696{
697 bool res;
698
699 if (kstrtobool(str, &res))
700 return -EINVAL;
701
702 tm_disabled = !res;
703
704 return 0;
705}
706early_param("ppc_tm", parse_ppc_tm);
707
708static void __init tm_init(void)
709{
710 if (tm_disabled) {
711 pr_info("Disabling hardware transactional memory (HTM)\n");
712 cur_cpu_spec->cpu_user_features2 &=
713 ~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
714 cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
715 return;
716 }
717
718 pnv_tm_init();
719}
720#else
721static void tm_init(void) { }
722#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
723
724static int __init
725early_init_dt_scan_model(unsigned long node, const char *uname,
726 int depth, void *data)
727{
728 const char *prop;
729
730 if (depth != 0)
731 return 0;
732
733 prop = of_get_flat_dt_prop(node, "model", NULL);
734 if (prop)
735 seq_buf_printf(&ppc_hw_desc, "%s ", prop);
736
737 /* break now */
738 return 1;
739}
740
741#ifdef CONFIG_PPC64
742static void __init save_fscr_to_task(void)
743{
744 /*
745 * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
746 * have configured via the device tree features or via __init_FSCR().
747 * That value will then be propagated to pid 1 (init) and all future
748 * processes.
749 */
750 if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
751 init_task.thread.fscr = mfspr(SPRN_FSCR);
752}
753#else
754static inline void save_fscr_to_task(void) {}
755#endif
756
757
758void __init early_init_devtree(void *params)
759{
760 phys_addr_t limit;
761
762 DBG(" -> early_init_devtree(%px)\n", params);
763
764 /* Too early to BUG_ON(), do it by hand */
765 if (!early_init_dt_verify(params))
766 panic("BUG: Failed verifying flat device tree, bad version?");
767
768 of_scan_flat_dt(early_init_dt_scan_model, NULL);
769
770#ifdef CONFIG_PPC_RTAS
771 /* Some machines might need RTAS info for debugging, grab it now. */
772 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
773#endif
774
775#ifdef CONFIG_PPC_POWERNV
776 /* Some machines might need OPAL info for debugging, grab it now. */
777 of_scan_flat_dt(early_init_dt_scan_opal, NULL);
778
779 /* Scan tree for ultravisor feature */
780 of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
781#endif
782
783#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
784 /* scan tree to see if dump is active during last boot */
785 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
786#endif
787
788 /* Retrieve various informations from the /chosen node of the
789 * device-tree, including the platform type, initrd location and
790 * size, TCE reserve, and more ...
791 */
792 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
793
794 /* Scan memory nodes and rebuild MEMBLOCKs */
795 early_init_dt_scan_root();
796 early_init_dt_scan_memory_ppc();
797
798 /*
799 * As generic code authors expect to be able to use static keys
800 * in early_param() handlers, we initialize the static keys just
801 * before parsing early params (it's fine to call jump_label_init()
802 * more than once).
803 */
804 jump_label_init();
805 parse_early_param();
806
807 /* make sure we've parsed cmdline for mem= before this */
808 if (memory_limit)
809 first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
810 setup_initial_memory_limit(memstart_addr, first_memblock_size);
811 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
812 memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
813 /* If relocatable, reserve first 32k for interrupt vectors etc. */
814 if (PHYSICAL_START > MEMORY_START)
815 memblock_reserve(MEMORY_START, 0x8000);
816 reserve_kdump_trampoline();
817#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
818 /*
819 * If we fail to reserve memory for firmware-assisted dump then
820 * fallback to kexec based kdump.
821 */
822 if (fadump_reserve_mem() == 0)
823#endif
824 reserve_crashkernel();
825 early_reserve_mem();
826
827 /* Ensure that total memory size is page-aligned. */
828 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
829 memblock_enforce_memory_limit(limit);
830
831#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
832 if (!early_radix_enabled())
833 memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
834#endif
835
836 memblock_allow_resize();
837 memblock_dump_all();
838
839 DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
840
841 /* We may need to relocate the flat tree, do it now.
842 * FIXME .. and the initrd too? */
843 move_device_tree();
844
845 DBG("Scanning CPUs ...\n");
846
847 dt_cpu_ftrs_scan();
848
849 // We can now add the CPU name & PVR to the hardware description
850 seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
851
852 /* Retrieve CPU related informations from the flat tree
853 * (altivec support, boot CPU ID, ...)
854 */
855 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
856 if (boot_cpuid < 0) {
857 printk("Failed to identify boot CPU !\n");
858 BUG();
859 }
860
861 save_fscr_to_task();
862
863#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
864 /* We'll later wait for secondaries to check in; there are
865 * NCPUS-1 non-boot CPUs :-)
866 */
867 spinning_secondaries = boot_cpu_count - 1;
868#endif
869
870 mmu_early_init_devtree();
871
872#ifdef CONFIG_PPC_POWERNV
873 /* Scan and build the list of machine check recoverable ranges */
874 of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
875#endif
876 epapr_paravirt_early_init();
877
878 /* Now try to figure out if we are running on LPAR and so on */
879 pseries_probe_fw_features();
880
881 /*
882 * Initialize pkey features and default AMR/IAMR values
883 */
884 pkey_early_init_devtree();
885
886#ifdef CONFIG_PPC_PS3
887 /* Identify PS3 firmware */
888 if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
889 powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
890#endif
891
892 /* If kexec left a PLPKS password in the DT, get it and clear it */
893 plpks_early_init_devtree();
894
895 tm_init();
896
897 DBG(" <- early_init_devtree()\n");
898}
899
900#ifdef CONFIG_RELOCATABLE
901/*
902 * This function run before early_init_devtree, so we have to init
903 * initial_boot_params.
904 */
905void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
906{
907 /* Setup flat device-tree pointer */
908 initial_boot_params = params;
909
910 /*
911 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
912 * mess the memblock.
913 */
914 add_mem_to_memblock = 0;
915 early_init_dt_scan_root();
916 early_init_dt_scan_memory_ppc();
917 add_mem_to_memblock = 1;
918
919 if (size)
920 *size = first_memblock_size;
921}
922#endif
923
924/*******
925 *
926 * New implementation of the OF "find" APIs, return a refcounted
927 * object, call of_node_put() when done. The device tree and list
928 * are protected by a rw_lock.
929 *
930 * Note that property management will need some locking as well,
931 * this isn't dealt with yet.
932 *
933 *******/
934
935/**
936 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
937 * @np: device node of the device
938 *
939 * This looks for a property "ibm,chip-id" in the node or any
940 * of its parents and returns its content, or -1 if it cannot
941 * be found.
942 */
943int of_get_ibm_chip_id(struct device_node *np)
944{
945 of_node_get(np);
946 while (np) {
947 u32 chip_id;
948
949 /*
950 * Skiboot may produce memory nodes that contain more than one
951 * cell in chip-id, we only read the first one here.
952 */
953 if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
954 of_node_put(np);
955 return chip_id;
956 }
957
958 np = of_get_next_parent(np);
959 }
960 return -1;
961}
962EXPORT_SYMBOL(of_get_ibm_chip_id);
963
964/**
965 * cpu_to_chip_id - Return the cpus chip-id
966 * @cpu: The logical cpu number.
967 *
968 * Return the value of the ibm,chip-id property corresponding to the given
969 * logical cpu number. If the chip-id can not be found, returns -1.
970 */
971int cpu_to_chip_id(int cpu)
972{
973 struct device_node *np;
974 int ret = -1, idx;
975
976 idx = cpu / threads_per_core;
977 if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
978 return chip_id_lookup_table[idx];
979
980 np = of_get_cpu_node(cpu, NULL);
981 if (np) {
982 ret = of_get_ibm_chip_id(np);
983 of_node_put(np);
984
985 if (chip_id_lookup_table)
986 chip_id_lookup_table[idx] = ret;
987 }
988
989 return ret;
990}
991EXPORT_SYMBOL(cpu_to_chip_id);
992
993bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
994{
995#ifdef CONFIG_SMP
996 /*
997 * Early firmware scanning must use this rather than
998 * get_hard_smp_processor_id because we don't have pacas allocated
999 * until memory topology is discovered.
1000 */
1001 if (cpu_to_phys_id != NULL)
1002 return (int)phys_id == cpu_to_phys_id[cpu];
1003#endif
1004
1005 return (int)phys_id == get_hard_smp_processor_id(cpu);
1006}