Loading...
1/*
2 * Procedures for creating, accessing and interpreting the device tree.
3 *
4 * Paul Mackerras August 1996.
5 * Copyright (C) 1996-2005 Paul Mackerras.
6 *
7 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
8 * {engebret|bergner}@us.ibm.com
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#undef DEBUG
17
18#include <stdarg.h>
19#include <linux/kernel.h>
20#include <linux/string.h>
21#include <linux/init.h>
22#include <linux/threads.h>
23#include <linux/spinlock.h>
24#include <linux/types.h>
25#include <linux/pci.h>
26#include <linux/stringify.h>
27#include <linux/delay.h>
28#include <linux/initrd.h>
29#include <linux/bitops.h>
30#include <linux/export.h>
31#include <linux/kexec.h>
32#include <linux/irq.h>
33#include <linux/memblock.h>
34#include <linux/of.h>
35#include <linux/of_fdt.h>
36#include <linux/libfdt.h>
37#include <linux/cpu.h>
38
39#include <asm/prom.h>
40#include <asm/rtas.h>
41#include <asm/page.h>
42#include <asm/processor.h>
43#include <asm/irq.h>
44#include <asm/io.h>
45#include <asm/kdump.h>
46#include <asm/smp.h>
47#include <asm/mmu.h>
48#include <asm/paca.h>
49#include <asm/pgtable.h>
50#include <asm/powernv.h>
51#include <asm/iommu.h>
52#include <asm/btext.h>
53#include <asm/sections.h>
54#include <asm/machdep.h>
55#include <asm/pci-bridge.h>
56#include <asm/kexec.h>
57#include <asm/opal.h>
58#include <asm/fadump.h>
59#include <asm/epapr_hcalls.h>
60#include <asm/firmware.h>
61#include <asm/dt_cpu_ftrs.h>
62#include <asm/drmem.h>
63
64#include <mm/mmu_decl.h>
65
66#ifdef DEBUG
67#define DBG(fmt...) printk(KERN_ERR fmt)
68#else
69#define DBG(fmt...)
70#endif
71
72#ifdef CONFIG_PPC64
73int __initdata iommu_is_off;
74int __initdata iommu_force_on;
75unsigned long tce_alloc_start, tce_alloc_end;
76u64 ppc64_rma_size;
77#endif
78static phys_addr_t first_memblock_size;
79static int __initdata boot_cpu_count;
80
81static int __init early_parse_mem(char *p)
82{
83 if (!p)
84 return 1;
85
86 memory_limit = PAGE_ALIGN(memparse(p, &p));
87 DBG("memory limit = 0x%llx\n", memory_limit);
88
89 return 0;
90}
91early_param("mem", early_parse_mem);
92
93/*
94 * overlaps_initrd - check for overlap with page aligned extension of
95 * initrd.
96 */
97static inline int overlaps_initrd(unsigned long start, unsigned long size)
98{
99#ifdef CONFIG_BLK_DEV_INITRD
100 if (!initrd_start)
101 return 0;
102
103 return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
104 start <= _ALIGN_UP(initrd_end, PAGE_SIZE);
105#else
106 return 0;
107#endif
108}
109
110/**
111 * move_device_tree - move tree to an unused area, if needed.
112 *
113 * The device tree may be allocated beyond our memory limit, or inside the
114 * crash kernel region for kdump, or within the page aligned range of initrd.
115 * If so, move it out of the way.
116 */
117static void __init move_device_tree(void)
118{
119 unsigned long start, size;
120 void *p;
121
122 DBG("-> move_device_tree\n");
123
124 start = __pa(initial_boot_params);
125 size = fdt_totalsize(initial_boot_params);
126
127 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
128 overlaps_crashkernel(start, size) ||
129 overlaps_initrd(start, size)) {
130 p = __va(memblock_alloc(size, PAGE_SIZE));
131 memcpy(p, initial_boot_params, size);
132 initial_boot_params = p;
133 DBG("Moved device tree to 0x%p\n", p);
134 }
135
136 DBG("<- move_device_tree\n");
137}
138
139/*
140 * ibm,pa-features is a per-cpu property that contains a string of
141 * attribute descriptors, each of which has a 2 byte header plus up
142 * to 254 bytes worth of processor attribute bits. First header
143 * byte specifies the number of bytes following the header.
144 * Second header byte is an "attribute-specifier" type, of which
145 * zero is the only currently-defined value.
146 * Implementation: Pass in the byte and bit offset for the feature
147 * that we are interested in. The function will return -1 if the
148 * pa-features property is missing, or a 1/0 to indicate if the feature
149 * is supported/not supported. Note that the bit numbers are
150 * big-endian to match the definition in PAPR.
151 */
152static struct ibm_pa_feature {
153 unsigned long cpu_features; /* CPU_FTR_xxx bit */
154 unsigned long mmu_features; /* MMU_FTR_xxx bit */
155 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
156 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
157 unsigned char pabyte; /* byte number in ibm,pa-features */
158 unsigned char pabit; /* bit number (big-endian) */
159 unsigned char invert; /* if 1, pa bit set => clear feature */
160} ibm_pa_features[] __initdata = {
161 { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
162 { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
163 { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
164 { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
165 { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
166#ifdef CONFIG_PPC_RADIX_MMU
167 { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX },
168#endif
169 { .pabyte = 1, .pabit = 1, .invert = 1, .cpu_features = CPU_FTR_NODSISRALIGN },
170 { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
171 .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
172 /*
173 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
174 * we don't want to turn on TM here, so we use the *_COMP versions
175 * which are 0 if the kernel doesn't support TM.
176 */
177 { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
178 .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
179};
180
181static void __init scan_features(unsigned long node, const unsigned char *ftrs,
182 unsigned long tablelen,
183 struct ibm_pa_feature *fp,
184 unsigned long ft_size)
185{
186 unsigned long i, len, bit;
187
188 /* find descriptor with type == 0 */
189 for (;;) {
190 if (tablelen < 3)
191 return;
192 len = 2 + ftrs[0];
193 if (tablelen < len)
194 return; /* descriptor 0 not found */
195 if (ftrs[1] == 0)
196 break;
197 tablelen -= len;
198 ftrs += len;
199 }
200
201 /* loop over bits we know about */
202 for (i = 0; i < ft_size; ++i, ++fp) {
203 if (fp->pabyte >= ftrs[0])
204 continue;
205 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
206 if (bit ^ fp->invert) {
207 cur_cpu_spec->cpu_features |= fp->cpu_features;
208 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
209 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
210 cur_cpu_spec->mmu_features |= fp->mmu_features;
211 } else {
212 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
213 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
214 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
215 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
216 }
217 }
218}
219
220static void __init check_cpu_pa_features(unsigned long node)
221{
222 const unsigned char *pa_ftrs;
223 int tablelen;
224
225 pa_ftrs = of_get_flat_dt_prop(node, "ibm,pa-features", &tablelen);
226 if (pa_ftrs == NULL)
227 return;
228
229 scan_features(node, pa_ftrs, tablelen,
230 ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
231}
232
233#ifdef CONFIG_PPC_BOOK3S_64
234static void __init init_mmu_slb_size(unsigned long node)
235{
236 const __be32 *slb_size_ptr;
237
238 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
239 of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
240
241 if (slb_size_ptr)
242 mmu_slb_size = be32_to_cpup(slb_size_ptr);
243}
244#else
245#define init_mmu_slb_size(node) do { } while(0)
246#endif
247
248static struct feature_property {
249 const char *name;
250 u32 min_value;
251 unsigned long cpu_feature;
252 unsigned long cpu_user_ftr;
253} feature_properties[] __initdata = {
254#ifdef CONFIG_ALTIVEC
255 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
256 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
257#endif /* CONFIG_ALTIVEC */
258#ifdef CONFIG_VSX
259 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
260 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
261#endif /* CONFIG_VSX */
262#ifdef CONFIG_PPC64
263 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
264 {"ibm,purr", 1, CPU_FTR_PURR, 0},
265 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
266#endif /* CONFIG_PPC64 */
267};
268
269#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
270static inline void identical_pvr_fixup(unsigned long node)
271{
272 unsigned int pvr;
273 const char *model = of_get_flat_dt_prop(node, "model", NULL);
274
275 /*
276 * Since 440GR(x)/440EP(x) processors have the same pvr,
277 * we check the node path and set bit 28 in the cur_cpu_spec
278 * pvr for EP(x) processor version. This bit is always 0 in
279 * the "real" pvr. Then we call identify_cpu again with
280 * the new logical pvr to enable FPU support.
281 */
282 if (model && strstr(model, "440EP")) {
283 pvr = cur_cpu_spec->pvr_value | 0x8;
284 identify_cpu(0, pvr);
285 DBG("Using logical pvr %x for %s\n", pvr, model);
286 }
287}
288#else
289#define identical_pvr_fixup(node) do { } while(0)
290#endif
291
292static void __init check_cpu_feature_properties(unsigned long node)
293{
294 int i;
295 struct feature_property *fp = feature_properties;
296 const __be32 *prop;
297
298 for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
299 prop = of_get_flat_dt_prop(node, fp->name, NULL);
300 if (prop && be32_to_cpup(prop) >= fp->min_value) {
301 cur_cpu_spec->cpu_features |= fp->cpu_feature;
302 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
303 }
304 }
305}
306
307static int __init early_init_dt_scan_cpus(unsigned long node,
308 const char *uname, int depth,
309 void *data)
310{
311 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
312 const __be32 *prop;
313 const __be32 *intserv;
314 int i, nthreads;
315 int len;
316 int found = -1;
317 int found_thread = 0;
318
319 /* We are scanning "cpu" nodes only */
320 if (type == NULL || strcmp(type, "cpu") != 0)
321 return 0;
322
323 /* Get physical cpuid */
324 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
325 if (!intserv)
326 intserv = of_get_flat_dt_prop(node, "reg", &len);
327
328 nthreads = len / sizeof(int);
329
330 /*
331 * Now see if any of these threads match our boot cpu.
332 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
333 */
334 for (i = 0; i < nthreads; i++) {
335 /*
336 * version 2 of the kexec param format adds the phys cpuid of
337 * booted proc.
338 */
339 if (fdt_version(initial_boot_params) >= 2) {
340 if (be32_to_cpu(intserv[i]) ==
341 fdt_boot_cpuid_phys(initial_boot_params)) {
342 found = boot_cpu_count;
343 found_thread = i;
344 }
345 } else {
346 /*
347 * Check if it's the boot-cpu, set it's hw index now,
348 * unfortunately this format did not support booting
349 * off secondary threads.
350 */
351 if (of_get_flat_dt_prop(node,
352 "linux,boot-cpu", NULL) != NULL)
353 found = boot_cpu_count;
354 }
355#ifdef CONFIG_SMP
356 /* logical cpu id is always 0 on UP kernels */
357 boot_cpu_count++;
358#endif
359 }
360
361 /* Not the boot CPU */
362 if (found < 0)
363 return 0;
364
365 DBG("boot cpu: logical %d physical %d\n", found,
366 be32_to_cpu(intserv[found_thread]));
367 boot_cpuid = found;
368
369 /*
370 * PAPR defines "logical" PVR values for cpus that
371 * meet various levels of the architecture:
372 * 0x0f000001 Architecture version 2.04
373 * 0x0f000002 Architecture version 2.05
374 * If the cpu-version property in the cpu node contains
375 * such a value, we call identify_cpu again with the
376 * logical PVR value in order to use the cpu feature
377 * bits appropriate for the architecture level.
378 *
379 * A POWER6 partition in "POWER6 architected" mode
380 * uses the 0x0f000002 PVR value; in POWER5+ mode
381 * it uses 0x0f000001.
382 *
383 * If we're using device tree CPU feature discovery then we don't
384 * support the cpu-version property, and it's the responsibility of the
385 * firmware/hypervisor to provide the correct feature set for the
386 * architecture level via the ibm,powerpc-cpu-features binding.
387 */
388 if (!dt_cpu_ftrs_in_use()) {
389 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
390 if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000)
391 identify_cpu(0, be32_to_cpup(prop));
392
393 check_cpu_feature_properties(node);
394 check_cpu_pa_features(node);
395 }
396
397 identical_pvr_fixup(node);
398 init_mmu_slb_size(node);
399
400#ifdef CONFIG_PPC64
401 if (nthreads == 1)
402 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
403 else if (!dt_cpu_ftrs_in_use())
404 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
405 allocate_paca(boot_cpuid);
406#endif
407 set_hard_smp_processor_id(found, be32_to_cpu(intserv[found_thread]));
408
409 return 0;
410}
411
412static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
413 const char *uname,
414 int depth, void *data)
415{
416 const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
417
418 /* Use common scan routine to determine if this is the chosen node */
419 if (early_init_dt_scan_chosen(node, uname, depth, data) == 0)
420 return 0;
421
422#ifdef CONFIG_PPC64
423 /* check if iommu is forced on or off */
424 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
425 iommu_is_off = 1;
426 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
427 iommu_force_on = 1;
428#endif
429
430 /* mem=x on the command line is the preferred mechanism */
431 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
432 if (lprop)
433 memory_limit = *lprop;
434
435#ifdef CONFIG_PPC64
436 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
437 if (lprop)
438 tce_alloc_start = *lprop;
439 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
440 if (lprop)
441 tce_alloc_end = *lprop;
442#endif
443
444#ifdef CONFIG_KEXEC_CORE
445 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
446 if (lprop)
447 crashk_res.start = *lprop;
448
449 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
450 if (lprop)
451 crashk_res.end = crashk_res.start + *lprop - 1;
452#endif
453
454 /* break now */
455 return 1;
456}
457
458#ifdef CONFIG_PPC_PSERIES
459/*
460 * Interpret the ibm dynamic reconfiguration memory LMBs.
461 * This contains a list of memory blocks along with NUMA affinity
462 * information.
463 */
464static void __init early_init_drmem_lmb(struct drmem_lmb *lmb,
465 const __be32 **usm)
466{
467 u64 base, size;
468 int is_kexec_kdump = 0, rngs;
469
470 base = lmb->base_addr;
471 size = drmem_lmb_size();
472 rngs = 1;
473
474 /*
475 * Skip this block if the reserved bit is set in flags
476 * or if the block is not assigned to this partition.
477 */
478 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
479 !(lmb->flags & DRCONF_MEM_ASSIGNED))
480 return;
481
482 if (*usm)
483 is_kexec_kdump = 1;
484
485 if (is_kexec_kdump) {
486 /*
487 * For each memblock in ibm,dynamic-memory, a
488 * corresponding entry in linux,drconf-usable-memory
489 * property contains a counter 'p' followed by 'p'
490 * (base, size) duple. Now read the counter from
491 * linux,drconf-usable-memory property
492 */
493 rngs = dt_mem_next_cell(dt_root_size_cells, usm);
494 if (!rngs) /* there are no (base, size) duple */
495 return;
496 }
497
498 do {
499 if (is_kexec_kdump) {
500 base = dt_mem_next_cell(dt_root_addr_cells, usm);
501 size = dt_mem_next_cell(dt_root_size_cells, usm);
502 }
503
504 if (iommu_is_off) {
505 if (base >= 0x80000000ul)
506 continue;
507 if ((base + size) > 0x80000000ul)
508 size = 0x80000000ul - base;
509 }
510
511 DBG("Adding: %llx -> %llx\n", base, size);
512 memblock_add(base, size);
513 } while (--rngs);
514}
515#endif /* CONFIG_PPC_PSERIES */
516
517static int __init early_init_dt_scan_memory_ppc(unsigned long node,
518 const char *uname,
519 int depth, void *data)
520{
521#ifdef CONFIG_PPC_PSERIES
522 if (depth == 1 &&
523 strcmp(uname, "ibm,dynamic-reconfiguration-memory") == 0) {
524 walk_drmem_lmbs_early(node, early_init_drmem_lmb);
525 return 0;
526 }
527#endif
528
529 return early_init_dt_scan_memory(node, uname, depth, data);
530}
531
532/*
533 * For a relocatable kernel, we need to get the memstart_addr first,
534 * then use it to calculate the virtual kernel start address. This has
535 * to happen at a very early stage (before machine_init). In this case,
536 * we just want to get the memstart_address and would not like to mess the
537 * memblock at this stage. So introduce a variable to skip the memblock_add()
538 * for this reason.
539 */
540#ifdef CONFIG_RELOCATABLE
541static int add_mem_to_memblock = 1;
542#else
543#define add_mem_to_memblock 1
544#endif
545
546void __init early_init_dt_add_memory_arch(u64 base, u64 size)
547{
548#ifdef CONFIG_PPC64
549 if (iommu_is_off) {
550 if (base >= 0x80000000ul)
551 return;
552 if ((base + size) > 0x80000000ul)
553 size = 0x80000000ul - base;
554 }
555#endif
556 /* Keep track of the beginning of memory -and- the size of
557 * the very first block in the device-tree as it represents
558 * the RMA on ppc64 server
559 */
560 if (base < memstart_addr) {
561 memstart_addr = base;
562 first_memblock_size = size;
563 }
564
565 /* Add the chunk to the MEMBLOCK list */
566 if (add_mem_to_memblock)
567 memblock_add(base, size);
568}
569
570static void __init early_reserve_mem_dt(void)
571{
572 unsigned long i, dt_root;
573 int len;
574 const __be32 *prop;
575
576 early_init_fdt_reserve_self();
577 early_init_fdt_scan_reserved_mem();
578
579 dt_root = of_get_flat_dt_root();
580
581 prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
582
583 if (!prop)
584 return;
585
586 DBG("Found new-style reserved-ranges\n");
587
588 /* Each reserved range is an (address,size) pair, 2 cells each,
589 * totalling 4 cells per range. */
590 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
591 u64 base, size;
592
593 base = of_read_number(prop + (i * 4) + 0, 2);
594 size = of_read_number(prop + (i * 4) + 2, 2);
595
596 if (size) {
597 DBG("reserving: %llx -> %llx\n", base, size);
598 memblock_reserve(base, size);
599 }
600 }
601}
602
603static void __init early_reserve_mem(void)
604{
605 __be64 *reserve_map;
606
607 reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
608 fdt_off_mem_rsvmap(initial_boot_params));
609
610 /* Look for the new "reserved-regions" property in the DT */
611 early_reserve_mem_dt();
612
613#ifdef CONFIG_BLK_DEV_INITRD
614 /* Then reserve the initrd, if any */
615 if (initrd_start && (initrd_end > initrd_start)) {
616 memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
617 _ALIGN_UP(initrd_end, PAGE_SIZE) -
618 _ALIGN_DOWN(initrd_start, PAGE_SIZE));
619 }
620#endif /* CONFIG_BLK_DEV_INITRD */
621
622#ifdef CONFIG_PPC32
623 /*
624 * Handle the case where we might be booting from an old kexec
625 * image that setup the mem_rsvmap as pairs of 32-bit values
626 */
627 if (be64_to_cpup(reserve_map) > 0xffffffffull) {
628 u32 base_32, size_32;
629 __be32 *reserve_map_32 = (__be32 *)reserve_map;
630
631 DBG("Found old 32-bit reserve map\n");
632
633 while (1) {
634 base_32 = be32_to_cpup(reserve_map_32++);
635 size_32 = be32_to_cpup(reserve_map_32++);
636 if (size_32 == 0)
637 break;
638 DBG("reserving: %x -> %x\n", base_32, size_32);
639 memblock_reserve(base_32, size_32);
640 }
641 return;
642 }
643#endif
644}
645
646#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
647static bool tm_disabled __initdata;
648
649static int __init parse_ppc_tm(char *str)
650{
651 bool res;
652
653 if (kstrtobool(str, &res))
654 return -EINVAL;
655
656 tm_disabled = !res;
657
658 return 0;
659}
660early_param("ppc_tm", parse_ppc_tm);
661
662static void __init tm_init(void)
663{
664 if (tm_disabled) {
665 pr_info("Disabling hardware transactional memory (HTM)\n");
666 cur_cpu_spec->cpu_user_features2 &=
667 ~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
668 cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
669 return;
670 }
671
672 pnv_tm_init();
673}
674#else
675static void tm_init(void) { }
676#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
677
678void __init early_init_devtree(void *params)
679{
680 phys_addr_t limit;
681
682 DBG(" -> early_init_devtree(%p)\n", params);
683
684 /* Too early to BUG_ON(), do it by hand */
685 if (!early_init_dt_verify(params))
686 panic("BUG: Failed verifying flat device tree, bad version?");
687
688#ifdef CONFIG_PPC_RTAS
689 /* Some machines might need RTAS info for debugging, grab it now. */
690 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
691#endif
692
693#ifdef CONFIG_PPC_POWERNV
694 /* Some machines might need OPAL info for debugging, grab it now. */
695 of_scan_flat_dt(early_init_dt_scan_opal, NULL);
696#endif
697
698#ifdef CONFIG_FA_DUMP
699 /* scan tree to see if dump is active during last boot */
700 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
701#endif
702
703 /* Retrieve various informations from the /chosen node of the
704 * device-tree, including the platform type, initrd location and
705 * size, TCE reserve, and more ...
706 */
707 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
708
709 /* Scan memory nodes and rebuild MEMBLOCKs */
710 of_scan_flat_dt(early_init_dt_scan_root, NULL);
711 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
712
713 parse_early_param();
714
715 /* make sure we've parsed cmdline for mem= before this */
716 if (memory_limit)
717 first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
718 setup_initial_memory_limit(memstart_addr, first_memblock_size);
719 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
720 memblock_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START);
721 /* If relocatable, reserve first 32k for interrupt vectors etc. */
722 if (PHYSICAL_START > MEMORY_START)
723 memblock_reserve(MEMORY_START, 0x8000);
724 reserve_kdump_trampoline();
725#ifdef CONFIG_FA_DUMP
726 /*
727 * If we fail to reserve memory for firmware-assisted dump then
728 * fallback to kexec based kdump.
729 */
730 if (fadump_reserve_mem() == 0)
731#endif
732 reserve_crashkernel();
733 early_reserve_mem();
734
735 /* Ensure that total memory size is page-aligned. */
736 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
737 memblock_enforce_memory_limit(limit);
738
739 memblock_allow_resize();
740 memblock_dump_all();
741
742 DBG("Phys. mem: %llx\n", memblock_phys_mem_size());
743
744 /* We may need to relocate the flat tree, do it now.
745 * FIXME .. and the initrd too? */
746 move_device_tree();
747
748 allocate_paca_ptrs();
749
750 DBG("Scanning CPUs ...\n");
751
752 dt_cpu_ftrs_scan();
753
754 /* Retrieve CPU related informations from the flat tree
755 * (altivec support, boot CPU ID, ...)
756 */
757 of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
758 if (boot_cpuid < 0) {
759 printk("Failed to identify boot CPU !\n");
760 BUG();
761 }
762
763#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
764 /* We'll later wait for secondaries to check in; there are
765 * NCPUS-1 non-boot CPUs :-)
766 */
767 spinning_secondaries = boot_cpu_count - 1;
768#endif
769
770 mmu_early_init_devtree();
771
772#ifdef CONFIG_PPC_POWERNV
773 /* Scan and build the list of machine check recoverable ranges */
774 of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
775#endif
776 epapr_paravirt_early_init();
777
778 /* Now try to figure out if we are running on LPAR and so on */
779 pseries_probe_fw_features();
780
781#ifdef CONFIG_PPC_PS3
782 /* Identify PS3 firmware */
783 if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
784 powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
785#endif
786
787 tm_init();
788
789 DBG(" <- early_init_devtree()\n");
790}
791
792#ifdef CONFIG_RELOCATABLE
793/*
794 * This function run before early_init_devtree, so we have to init
795 * initial_boot_params.
796 */
797void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
798{
799 /* Setup flat device-tree pointer */
800 initial_boot_params = params;
801
802 /*
803 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
804 * mess the memblock.
805 */
806 add_mem_to_memblock = 0;
807 of_scan_flat_dt(early_init_dt_scan_root, NULL);
808 of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL);
809 add_mem_to_memblock = 1;
810
811 if (size)
812 *size = first_memblock_size;
813}
814#endif
815
816/*******
817 *
818 * New implementation of the OF "find" APIs, return a refcounted
819 * object, call of_node_put() when done. The device tree and list
820 * are protected by a rw_lock.
821 *
822 * Note that property management will need some locking as well,
823 * this isn't dealt with yet.
824 *
825 *******/
826
827/**
828 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
829 * @np: device node of the device
830 *
831 * This looks for a property "ibm,chip-id" in the node or any
832 * of its parents and returns its content, or -1 if it cannot
833 * be found.
834 */
835int of_get_ibm_chip_id(struct device_node *np)
836{
837 of_node_get(np);
838 while (np) {
839 u32 chip_id;
840
841 /*
842 * Skiboot may produce memory nodes that contain more than one
843 * cell in chip-id, we only read the first one here.
844 */
845 if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
846 of_node_put(np);
847 return chip_id;
848 }
849
850 np = of_get_next_parent(np);
851 }
852 return -1;
853}
854EXPORT_SYMBOL(of_get_ibm_chip_id);
855
856/**
857 * cpu_to_chip_id - Return the cpus chip-id
858 * @cpu: The logical cpu number.
859 *
860 * Return the value of the ibm,chip-id property corresponding to the given
861 * logical cpu number. If the chip-id can not be found, returns -1.
862 */
863int cpu_to_chip_id(int cpu)
864{
865 struct device_node *np;
866
867 np = of_get_cpu_node(cpu, NULL);
868 if (!np)
869 return -1;
870
871 of_node_put(np);
872 return of_get_ibm_chip_id(np);
873}
874EXPORT_SYMBOL(cpu_to_chip_id);
875
876bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
877{
878#ifdef CONFIG_SMP
879 /*
880 * Early firmware scanning must use this rather than
881 * get_hard_smp_processor_id because we don't have pacas allocated
882 * until memory topology is discovered.
883 */
884 if (cpu_to_phys_id != NULL)
885 return (int)phys_id == cpu_to_phys_id[cpu];
886#endif
887
888 return (int)phys_id == get_hard_smp_processor_id(cpu);
889}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Procedures for creating, accessing and interpreting the device tree.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 */
11
12#undef DEBUG
13
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/init.h>
17#include <linux/threads.h>
18#include <linux/spinlock.h>
19#include <linux/types.h>
20#include <linux/pci.h>
21#include <linux/delay.h>
22#include <linux/initrd.h>
23#include <linux/bitops.h>
24#include <linux/export.h>
25#include <linux/kexec.h>
26#include <linux/irq.h>
27#include <linux/memblock.h>
28#include <linux/of.h>
29#include <linux/of_fdt.h>
30#include <linux/libfdt.h>
31#include <linux/cpu.h>
32#include <linux/pgtable.h>
33#include <linux/seq_buf.h>
34
35#include <asm/rtas.h>
36#include <asm/page.h>
37#include <asm/processor.h>
38#include <asm/irq.h>
39#include <asm/io.h>
40#include <asm/kdump.h>
41#include <asm/smp.h>
42#include <asm/mmu.h>
43#include <asm/paca.h>
44#include <asm/powernv.h>
45#include <asm/iommu.h>
46#include <asm/btext.h>
47#include <asm/sections.h>
48#include <asm/setup.h>
49#include <asm/pci-bridge.h>
50#include <asm/kexec.h>
51#include <asm/opal.h>
52#include <asm/fadump.h>
53#include <asm/epapr_hcalls.h>
54#include <asm/firmware.h>
55#include <asm/dt_cpu_ftrs.h>
56#include <asm/drmem.h>
57#include <asm/ultravisor.h>
58#include <asm/prom.h>
59
60#include <mm/mmu_decl.h>
61
62#ifdef DEBUG
63#define DBG(fmt...) printk(KERN_ERR fmt)
64#else
65#define DBG(fmt...)
66#endif
67
68int *chip_id_lookup_table;
69
70#ifdef CONFIG_PPC64
71int __initdata iommu_is_off;
72int __initdata iommu_force_on;
73unsigned long tce_alloc_start, tce_alloc_end;
74u64 ppc64_rma_size;
75unsigned int boot_cpu_node_count __ro_after_init;
76#endif
77static phys_addr_t first_memblock_size;
78static int __initdata boot_cpu_count;
79
80static int __init early_parse_mem(char *p)
81{
82 if (!p)
83 return 1;
84
85 memory_limit = PAGE_ALIGN(memparse(p, &p));
86 DBG("memory limit = 0x%llx\n", memory_limit);
87
88 return 0;
89}
90early_param("mem", early_parse_mem);
91
92/*
93 * overlaps_initrd - check for overlap with page aligned extension of
94 * initrd.
95 */
96static inline int overlaps_initrd(unsigned long start, unsigned long size)
97{
98#ifdef CONFIG_BLK_DEV_INITRD
99 if (!initrd_start)
100 return 0;
101
102 return (start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
103 start <= ALIGN(initrd_end, PAGE_SIZE);
104#else
105 return 0;
106#endif
107}
108
109/**
110 * move_device_tree - move tree to an unused area, if needed.
111 *
112 * The device tree may be allocated beyond our memory limit, or inside the
113 * crash kernel region for kdump, or within the page aligned range of initrd.
114 * If so, move it out of the way.
115 */
116static void __init move_device_tree(void)
117{
118 unsigned long start, size;
119 void *p;
120
121 DBG("-> move_device_tree\n");
122
123 start = __pa(initial_boot_params);
124 size = fdt_totalsize(initial_boot_params);
125
126 if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
127 !memblock_is_memory(start + size - 1) ||
128 overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
129 p = memblock_alloc_raw(size, PAGE_SIZE);
130 if (!p)
131 panic("Failed to allocate %lu bytes to move device tree\n",
132 size);
133 memcpy(p, initial_boot_params, size);
134 initial_boot_params = p;
135 DBG("Moved device tree to 0x%px\n", p);
136 }
137
138 DBG("<- move_device_tree\n");
139}
140
141/*
142 * ibm,pa/pi-features is a per-cpu property that contains a string of
143 * attribute descriptors, each of which has a 2 byte header plus up
144 * to 254 bytes worth of processor attribute bits. First header
145 * byte specifies the number of bytes following the header.
146 * Second header byte is an "attribute-specifier" type, of which
147 * zero is the only currently-defined value.
148 * Implementation: Pass in the byte and bit offset for the feature
149 * that we are interested in. The function will return -1 if the
150 * pa-features property is missing, or a 1/0 to indicate if the feature
151 * is supported/not supported. Note that the bit numbers are
152 * big-endian to match the definition in PAPR.
153 */
154struct ibm_feature {
155 unsigned long cpu_features; /* CPU_FTR_xxx bit */
156 unsigned long mmu_features; /* MMU_FTR_xxx bit */
157 unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */
158 unsigned int cpu_user_ftrs2; /* PPC_FEATURE2_xxx bit */
159 unsigned char pabyte; /* byte number in ibm,pa/pi-features */
160 unsigned char pabit; /* bit number (big-endian) */
161 unsigned char invert; /* if 1, pa bit set => clear feature */
162};
163
164static struct ibm_feature ibm_pa_features[] __initdata = {
165 { .pabyte = 0, .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
166 { .pabyte = 0, .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
167 { .pabyte = 0, .pabit = 3, .cpu_features = CPU_FTR_CTRL },
168 { .pabyte = 0, .pabit = 6, .cpu_features = CPU_FTR_NOEXECUTE },
169 { .pabyte = 1, .pabit = 2, .mmu_features = MMU_FTR_CI_LARGE_PAGE },
170#ifdef CONFIG_PPC_RADIX_MMU
171 { .pabyte = 40, .pabit = 0, .mmu_features = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
172#endif
173 { .pabyte = 5, .pabit = 0, .cpu_features = CPU_FTR_REAL_LE,
174 .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
175 /*
176 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
177 * we don't want to turn on TM here, so we use the *_COMP versions
178 * which are 0 if the kernel doesn't support TM.
179 */
180 { .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
181 .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
182
183 { .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
184};
185
186/*
187 * ibm,pi-features property provides the support of processor specific
188 * options not described in ibm,pa-features. Right now use byte 0, bit 3
189 * which indicates the occurrence of DSI interrupt when the paste operation
190 * on the suspended NX window.
191 */
192static struct ibm_feature ibm_pi_features[] __initdata = {
193 { .pabyte = 0, .pabit = 3, .mmu_features = MMU_FTR_NX_DSI },
194};
195
196static void __init scan_features(unsigned long node, const unsigned char *ftrs,
197 unsigned long tablelen,
198 struct ibm_feature *fp,
199 unsigned long ft_size)
200{
201 unsigned long i, len, bit;
202
203 /* find descriptor with type == 0 */
204 for (;;) {
205 if (tablelen < 3)
206 return;
207 len = 2 + ftrs[0];
208 if (tablelen < len)
209 return; /* descriptor 0 not found */
210 if (ftrs[1] == 0)
211 break;
212 tablelen -= len;
213 ftrs += len;
214 }
215
216 /* loop over bits we know about */
217 for (i = 0; i < ft_size; ++i, ++fp) {
218 if (fp->pabyte >= ftrs[0])
219 continue;
220 bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
221 if (bit ^ fp->invert) {
222 cur_cpu_spec->cpu_features |= fp->cpu_features;
223 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
224 cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
225 cur_cpu_spec->mmu_features |= fp->mmu_features;
226 } else {
227 cur_cpu_spec->cpu_features &= ~fp->cpu_features;
228 cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
229 cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
230 cur_cpu_spec->mmu_features &= ~fp->mmu_features;
231 }
232 }
233}
234
235static void __init check_cpu_features(unsigned long node, char *name,
236 struct ibm_feature *fp,
237 unsigned long size)
238{
239 const unsigned char *pa_ftrs;
240 int tablelen;
241
242 pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
243 if (pa_ftrs == NULL)
244 return;
245
246 scan_features(node, pa_ftrs, tablelen, fp, size);
247}
248
249#ifdef CONFIG_PPC_64S_HASH_MMU
250static void __init init_mmu_slb_size(unsigned long node)
251{
252 const __be32 *slb_size_ptr;
253
254 slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
255 of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
256
257 if (slb_size_ptr)
258 mmu_slb_size = be32_to_cpup(slb_size_ptr);
259}
260#else
261#define init_mmu_slb_size(node) do { } while(0)
262#endif
263
264static struct feature_property {
265 const char *name;
266 u32 min_value;
267 unsigned long cpu_feature;
268 unsigned long cpu_user_ftr;
269} feature_properties[] __initdata = {
270#ifdef CONFIG_ALTIVEC
271 {"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
272 {"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
273#endif /* CONFIG_ALTIVEC */
274#ifdef CONFIG_VSX
275 /* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
276 {"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
277#endif /* CONFIG_VSX */
278#ifdef CONFIG_PPC64
279 {"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
280 {"ibm,purr", 1, CPU_FTR_PURR, 0},
281 {"ibm,spurr", 1, CPU_FTR_SPURR, 0},
282#endif /* CONFIG_PPC64 */
283};
284
285#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
286static __init void identical_pvr_fixup(unsigned long node)
287{
288 unsigned int pvr;
289 const char *model = of_get_flat_dt_prop(node, "model", NULL);
290
291 /*
292 * Since 440GR(x)/440EP(x) processors have the same pvr,
293 * we check the node path and set bit 28 in the cur_cpu_spec
294 * pvr for EP(x) processor version. This bit is always 0 in
295 * the "real" pvr. Then we call identify_cpu again with
296 * the new logical pvr to enable FPU support.
297 */
298 if (model && strstr(model, "440EP")) {
299 pvr = cur_cpu_spec->pvr_value | 0x8;
300 identify_cpu(0, pvr);
301 DBG("Using logical pvr %x for %s\n", pvr, model);
302 }
303}
304#else
305#define identical_pvr_fixup(node) do { } while(0)
306#endif
307
308static void __init check_cpu_feature_properties(unsigned long node)
309{
310 int i;
311 struct feature_property *fp = feature_properties;
312 const __be32 *prop;
313
314 for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
315 prop = of_get_flat_dt_prop(node, fp->name, NULL);
316 if (prop && be32_to_cpup(prop) >= fp->min_value) {
317 cur_cpu_spec->cpu_features |= fp->cpu_feature;
318 cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
319 }
320 }
321}
322
323static int __init early_init_dt_scan_cpus(unsigned long node,
324 const char *uname, int depth,
325 void *data)
326{
327 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
328 const __be32 *prop;
329 const __be32 *intserv;
330 int i, nthreads;
331 int len;
332 int found = -1;
333 int found_thread = 0;
334
335 /* We are scanning "cpu" nodes only */
336 if (type == NULL || strcmp(type, "cpu") != 0)
337 return 0;
338
339 if (IS_ENABLED(CONFIG_PPC64))
340 boot_cpu_node_count++;
341
342 /* Get physical cpuid */
343 intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
344 if (!intserv)
345 intserv = of_get_flat_dt_prop(node, "reg", &len);
346
347 nthreads = len / sizeof(int);
348
349 /*
350 * Now see if any of these threads match our boot cpu.
351 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
352 */
353 for (i = 0; i < nthreads; i++) {
354 if (be32_to_cpu(intserv[i]) ==
355 fdt_boot_cpuid_phys(initial_boot_params)) {
356 found = boot_cpu_count;
357 found_thread = i;
358 }
359#ifdef CONFIG_SMP
360 /* logical cpu id is always 0 on UP kernels */
361 boot_cpu_count++;
362#endif
363 }
364
365 /* Not the boot CPU */
366 if (found < 0)
367 return 0;
368
369 DBG("boot cpu: logical %d physical %d\n", found,
370 be32_to_cpu(intserv[found_thread]));
371 boot_cpuid = found;
372
373 // Pass the boot CPU's hard CPU id back to our caller
374 *((u32 *)data) = be32_to_cpu(intserv[found_thread]);
375
376 /*
377 * PAPR defines "logical" PVR values for cpus that
378 * meet various levels of the architecture:
379 * 0x0f000001 Architecture version 2.04
380 * 0x0f000002 Architecture version 2.05
381 * If the cpu-version property in the cpu node contains
382 * such a value, we call identify_cpu again with the
383 * logical PVR value in order to use the cpu feature
384 * bits appropriate for the architecture level.
385 *
386 * A POWER6 partition in "POWER6 architected" mode
387 * uses the 0x0f000002 PVR value; in POWER5+ mode
388 * it uses 0x0f000001.
389 *
390 * If we're using device tree CPU feature discovery then we don't
391 * support the cpu-version property, and it's the responsibility of the
392 * firmware/hypervisor to provide the correct feature set for the
393 * architecture level via the ibm,powerpc-cpu-features binding.
394 */
395 if (!dt_cpu_ftrs_in_use()) {
396 prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
397 if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
398 identify_cpu(0, be32_to_cpup(prop));
399 seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
400 }
401
402 check_cpu_feature_properties(node);
403 check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
404 ARRAY_SIZE(ibm_pa_features));
405 check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
406 ARRAY_SIZE(ibm_pi_features));
407 }
408
409 identical_pvr_fixup(node);
410 init_mmu_slb_size(node);
411
412#ifdef CONFIG_PPC64
413 if (nthreads == 1)
414 cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
415 else if (!dt_cpu_ftrs_in_use())
416 cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
417#endif
418
419 return 0;
420}
421
422static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
423 const char *uname,
424 int depth, void *data)
425{
426 const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
427
428 /* Use common scan routine to determine if this is the chosen node */
429 if (early_init_dt_scan_chosen(data) < 0)
430 return 0;
431
432#ifdef CONFIG_PPC64
433 /* check if iommu is forced on or off */
434 if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
435 iommu_is_off = 1;
436 if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
437 iommu_force_on = 1;
438#endif
439
440 /* mem=x on the command line is the preferred mechanism */
441 lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
442 if (lprop)
443 memory_limit = *lprop;
444
445#ifdef CONFIG_PPC64
446 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
447 if (lprop)
448 tce_alloc_start = *lprop;
449 lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
450 if (lprop)
451 tce_alloc_end = *lprop;
452#endif
453
454#ifdef CONFIG_KEXEC_CORE
455 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
456 if (lprop)
457 crashk_res.start = *lprop;
458
459 lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
460 if (lprop)
461 crashk_res.end = crashk_res.start + *lprop - 1;
462#endif
463
464 /* break now */
465 return 1;
466}
467
468/*
469 * Compare the range against max mem limit and update
470 * size if it cross the limit.
471 */
472
473#ifdef CONFIG_SPARSEMEM
474static bool __init validate_mem_limit(u64 base, u64 *size)
475{
476 u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
477
478 if (base >= max_mem)
479 return false;
480 if ((base + *size) > max_mem)
481 *size = max_mem - base;
482 return true;
483}
484#else
485static bool __init validate_mem_limit(u64 base, u64 *size)
486{
487 return true;
488}
489#endif
490
491#ifdef CONFIG_PPC_PSERIES
492/*
493 * Interpret the ibm dynamic reconfiguration memory LMBs.
494 * This contains a list of memory blocks along with NUMA affinity
495 * information.
496 */
497static int __init early_init_drmem_lmb(struct drmem_lmb *lmb,
498 const __be32 **usm,
499 void *data)
500{
501 u64 base, size;
502 int is_kexec_kdump = 0, rngs;
503
504 base = lmb->base_addr;
505 size = drmem_lmb_size();
506 rngs = 1;
507
508 /*
509 * Skip this block if the reserved bit is set in flags
510 * or if the block is not assigned to this partition.
511 */
512 if ((lmb->flags & DRCONF_MEM_RESERVED) ||
513 !(lmb->flags & DRCONF_MEM_ASSIGNED))
514 return 0;
515
516 if (*usm)
517 is_kexec_kdump = 1;
518
519 if (is_kexec_kdump) {
520 /*
521 * For each memblock in ibm,dynamic-memory, a
522 * corresponding entry in linux,drconf-usable-memory
523 * property contains a counter 'p' followed by 'p'
524 * (base, size) duple. Now read the counter from
525 * linux,drconf-usable-memory property
526 */
527 rngs = dt_mem_next_cell(dt_root_size_cells, usm);
528 if (!rngs) /* there are no (base, size) duple */
529 return 0;
530 }
531
532 do {
533 if (is_kexec_kdump) {
534 base = dt_mem_next_cell(dt_root_addr_cells, usm);
535 size = dt_mem_next_cell(dt_root_size_cells, usm);
536 }
537
538 if (iommu_is_off) {
539 if (base >= 0x80000000ul)
540 continue;
541 if ((base + size) > 0x80000000ul)
542 size = 0x80000000ul - base;
543 }
544
545 if (!validate_mem_limit(base, &size))
546 continue;
547
548 DBG("Adding: %llx -> %llx\n", base, size);
549 memblock_add(base, size);
550
551 if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
552 memblock_mark_hotplug(base, size);
553 } while (--rngs);
554
555 return 0;
556}
557#endif /* CONFIG_PPC_PSERIES */
558
559static int __init early_init_dt_scan_memory_ppc(void)
560{
561#ifdef CONFIG_PPC_PSERIES
562 const void *fdt = initial_boot_params;
563 int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
564
565 if (node > 0)
566 walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
567
568#endif
569
570 return early_init_dt_scan_memory();
571}
572
573/*
574 * For a relocatable kernel, we need to get the memstart_addr first,
575 * then use it to calculate the virtual kernel start address. This has
576 * to happen at a very early stage (before machine_init). In this case,
577 * we just want to get the memstart_address and would not like to mess the
578 * memblock at this stage. So introduce a variable to skip the memblock_add()
579 * for this reason.
580 */
581#ifdef CONFIG_RELOCATABLE
582static int add_mem_to_memblock = 1;
583#else
584#define add_mem_to_memblock 1
585#endif
586
587void __init early_init_dt_add_memory_arch(u64 base, u64 size)
588{
589#ifdef CONFIG_PPC64
590 if (iommu_is_off) {
591 if (base >= 0x80000000ul)
592 return;
593 if ((base + size) > 0x80000000ul)
594 size = 0x80000000ul - base;
595 }
596#endif
597 /* Keep track of the beginning of memory -and- the size of
598 * the very first block in the device-tree as it represents
599 * the RMA on ppc64 server
600 */
601 if (base < memstart_addr) {
602 memstart_addr = base;
603 first_memblock_size = size;
604 }
605
606 /* Add the chunk to the MEMBLOCK list */
607 if (add_mem_to_memblock) {
608 if (validate_mem_limit(base, &size))
609 memblock_add(base, size);
610 }
611}
612
613static void __init early_reserve_mem_dt(void)
614{
615 unsigned long i, dt_root;
616 int len;
617 const __be32 *prop;
618
619 early_init_fdt_reserve_self();
620 early_init_fdt_scan_reserved_mem();
621
622 dt_root = of_get_flat_dt_root();
623
624 prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
625
626 if (!prop)
627 return;
628
629 DBG("Found new-style reserved-ranges\n");
630
631 /* Each reserved range is an (address,size) pair, 2 cells each,
632 * totalling 4 cells per range. */
633 for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
634 u64 base, size;
635
636 base = of_read_number(prop + (i * 4) + 0, 2);
637 size = of_read_number(prop + (i * 4) + 2, 2);
638
639 if (size) {
640 DBG("reserving: %llx -> %llx\n", base, size);
641 memblock_reserve(base, size);
642 }
643 }
644}
645
646static void __init early_reserve_mem(void)
647{
648 __be64 *reserve_map;
649
650 reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
651 fdt_off_mem_rsvmap(initial_boot_params));
652
653 /* Look for the new "reserved-regions" property in the DT */
654 early_reserve_mem_dt();
655
656#ifdef CONFIG_BLK_DEV_INITRD
657 /* Then reserve the initrd, if any */
658 if (initrd_start && (initrd_end > initrd_start)) {
659 memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
660 ALIGN(initrd_end, PAGE_SIZE) -
661 ALIGN_DOWN(initrd_start, PAGE_SIZE));
662 }
663#endif /* CONFIG_BLK_DEV_INITRD */
664
665 if (!IS_ENABLED(CONFIG_PPC32))
666 return;
667
668 /*
669 * Handle the case where we might be booting from an old kexec
670 * image that setup the mem_rsvmap as pairs of 32-bit values
671 */
672 if (be64_to_cpup(reserve_map) > 0xffffffffull) {
673 u32 base_32, size_32;
674 __be32 *reserve_map_32 = (__be32 *)reserve_map;
675
676 DBG("Found old 32-bit reserve map\n");
677
678 while (1) {
679 base_32 = be32_to_cpup(reserve_map_32++);
680 size_32 = be32_to_cpup(reserve_map_32++);
681 if (size_32 == 0)
682 break;
683 DBG("reserving: %x -> %x\n", base_32, size_32);
684 memblock_reserve(base_32, size_32);
685 }
686 return;
687 }
688}
689
690#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
691static bool tm_disabled __initdata;
692
693static int __init parse_ppc_tm(char *str)
694{
695 bool res;
696
697 if (kstrtobool(str, &res))
698 return -EINVAL;
699
700 tm_disabled = !res;
701
702 return 0;
703}
704early_param("ppc_tm", parse_ppc_tm);
705
706static void __init tm_init(void)
707{
708 if (tm_disabled) {
709 pr_info("Disabling hardware transactional memory (HTM)\n");
710 cur_cpu_spec->cpu_user_features2 &=
711 ~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
712 cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
713 return;
714 }
715
716 pnv_tm_init();
717}
718#else
719static void tm_init(void) { }
720#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
721
722static int __init
723early_init_dt_scan_model(unsigned long node, const char *uname,
724 int depth, void *data)
725{
726 const char *prop;
727
728 if (depth != 0)
729 return 0;
730
731 prop = of_get_flat_dt_prop(node, "model", NULL);
732 if (prop)
733 seq_buf_printf(&ppc_hw_desc, "%s ", prop);
734
735 /* break now */
736 return 1;
737}
738
739#ifdef CONFIG_PPC64
740static void __init save_fscr_to_task(void)
741{
742 /*
743 * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
744 * have configured via the device tree features or via __init_FSCR().
745 * That value will then be propagated to pid 1 (init) and all future
746 * processes.
747 */
748 if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
749 init_task.thread.fscr = mfspr(SPRN_FSCR);
750}
751#else
752static inline void save_fscr_to_task(void) {}
753#endif
754
755
756void __init early_init_devtree(void *params)
757{
758 u32 boot_cpu_hwid;
759 phys_addr_t limit;
760
761 DBG(" -> early_init_devtree(%px)\n", params);
762
763 /* Too early to BUG_ON(), do it by hand */
764 if (!early_init_dt_verify(params))
765 panic("BUG: Failed verifying flat device tree, bad version?");
766
767 of_scan_flat_dt(early_init_dt_scan_model, NULL);
768
769#ifdef CONFIG_PPC_RTAS
770 /* Some machines might need RTAS info for debugging, grab it now. */
771 of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
772#endif
773
774#ifdef CONFIG_PPC_POWERNV
775 /* Some machines might need OPAL info for debugging, grab it now. */
776 of_scan_flat_dt(early_init_dt_scan_opal, NULL);
777
778 /* Scan tree for ultravisor feature */
779 of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
780#endif
781
782#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
783 /* scan tree to see if dump is active during last boot */
784 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
785#endif
786
787 /* Retrieve various informations from the /chosen node of the
788 * device-tree, including the platform type, initrd location and
789 * size, TCE reserve, and more ...
790 */
791 of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
792
793 /* Scan memory nodes and rebuild MEMBLOCKs */
794 early_init_dt_scan_root();
795 early_init_dt_scan_memory_ppc();
796
797 /*
798 * As generic code authors expect to be able to use static keys
799 * in early_param() handlers, we initialize the static keys just
800 * before parsing early params (it's fine to call jump_label_init()
801 * more than once).
802 */
803 jump_label_init();
804 parse_early_param();
805
806 /* make sure we've parsed cmdline for mem= before this */
807 if (memory_limit)
808 first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
809 setup_initial_memory_limit(memstart_addr, first_memblock_size);
810 /* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
811 memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
812 /* If relocatable, reserve first 32k for interrupt vectors etc. */
813 if (PHYSICAL_START > MEMORY_START)
814 memblock_reserve(MEMORY_START, 0x8000);
815 reserve_kdump_trampoline();
816#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
817 /*
818 * If we fail to reserve memory for firmware-assisted dump then
819 * fallback to kexec based kdump.
820 */
821 if (fadump_reserve_mem() == 0)
822#endif
823 reserve_crashkernel();
824 early_reserve_mem();
825
826 /* Ensure that total memory size is page-aligned. */
827 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
828 memblock_enforce_memory_limit(limit);
829
830#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
831 if (!early_radix_enabled())
832 memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
833#endif
834
835 memblock_allow_resize();
836 memblock_dump_all();
837
838 DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
839
840 /* We may need to relocate the flat tree, do it now.
841 * FIXME .. and the initrd too? */
842 move_device_tree();
843
844 DBG("Scanning CPUs ...\n");
845
846 dt_cpu_ftrs_scan();
847
848 // We can now add the CPU name & PVR to the hardware description
849 seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
850
851 /* Retrieve CPU related informations from the flat tree
852 * (altivec support, boot CPU ID, ...)
853 */
854 of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
855 if (boot_cpuid < 0) {
856 printk("Failed to identify boot CPU !\n");
857 BUG();
858 }
859
860 save_fscr_to_task();
861
862#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
863 /* We'll later wait for secondaries to check in; there are
864 * NCPUS-1 non-boot CPUs :-)
865 */
866 spinning_secondaries = boot_cpu_count - 1;
867#endif
868
869 mmu_early_init_devtree();
870
871 // NB. paca is not installed until later in early_setup()
872 allocate_paca_ptrs();
873 allocate_paca(boot_cpuid);
874 set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
875
876#ifdef CONFIG_PPC_POWERNV
877 /* Scan and build the list of machine check recoverable ranges */
878 of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
879#endif
880 epapr_paravirt_early_init();
881
882 /* Now try to figure out if we are running on LPAR and so on */
883 pseries_probe_fw_features();
884
885 /*
886 * Initialize pkey features and default AMR/IAMR values
887 */
888 pkey_early_init_devtree();
889
890#ifdef CONFIG_PPC_PS3
891 /* Identify PS3 firmware */
892 if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
893 powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
894#endif
895
896 tm_init();
897
898 DBG(" <- early_init_devtree()\n");
899}
900
901#ifdef CONFIG_RELOCATABLE
902/*
903 * This function run before early_init_devtree, so we have to init
904 * initial_boot_params.
905 */
906void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
907{
908 /* Setup flat device-tree pointer */
909 initial_boot_params = params;
910
911 /*
912 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
913 * mess the memblock.
914 */
915 add_mem_to_memblock = 0;
916 early_init_dt_scan_root();
917 early_init_dt_scan_memory_ppc();
918 add_mem_to_memblock = 1;
919
920 if (size)
921 *size = first_memblock_size;
922}
923#endif
924
925/*******
926 *
927 * New implementation of the OF "find" APIs, return a refcounted
928 * object, call of_node_put() when done. The device tree and list
929 * are protected by a rw_lock.
930 *
931 * Note that property management will need some locking as well,
932 * this isn't dealt with yet.
933 *
934 *******/
935
936/**
937 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
938 * @np: device node of the device
939 *
940 * This looks for a property "ibm,chip-id" in the node or any
941 * of its parents and returns its content, or -1 if it cannot
942 * be found.
943 */
944int of_get_ibm_chip_id(struct device_node *np)
945{
946 of_node_get(np);
947 while (np) {
948 u32 chip_id;
949
950 /*
951 * Skiboot may produce memory nodes that contain more than one
952 * cell in chip-id, we only read the first one here.
953 */
954 if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
955 of_node_put(np);
956 return chip_id;
957 }
958
959 np = of_get_next_parent(np);
960 }
961 return -1;
962}
963EXPORT_SYMBOL(of_get_ibm_chip_id);
964
965/**
966 * cpu_to_chip_id - Return the cpus chip-id
967 * @cpu: The logical cpu number.
968 *
969 * Return the value of the ibm,chip-id property corresponding to the given
970 * logical cpu number. If the chip-id can not be found, returns -1.
971 */
972int cpu_to_chip_id(int cpu)
973{
974 struct device_node *np;
975 int ret = -1, idx;
976
977 idx = cpu / threads_per_core;
978 if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
979 return chip_id_lookup_table[idx];
980
981 np = of_get_cpu_node(cpu, NULL);
982 if (np) {
983 ret = of_get_ibm_chip_id(np);
984 of_node_put(np);
985
986 if (chip_id_lookup_table)
987 chip_id_lookup_table[idx] = ret;
988 }
989
990 return ret;
991}
992EXPORT_SYMBOL(cpu_to_chip_id);
993
994bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
995{
996#ifdef CONFIG_SMP
997 /*
998 * Early firmware scanning must use this rather than
999 * get_hard_smp_processor_id because we don't have pacas allocated
1000 * until memory topology is discovered.
1001 */
1002 if (cpu_to_phys_id != NULL)
1003 return (int)phys_id == cpu_to_phys_id[cpu];
1004#endif
1005
1006 return (int)phys_id == get_hard_smp_processor_id(cpu);
1007}