Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Procedures for creating, accessing and interpreting the device tree.
   4 *
   5 * Paul Mackerras	August 1996.
   6 * Copyright (C) 1996-2005 Paul Mackerras.
   7 * 
   8 *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
   9 *    {engebret|bergner}@us.ibm.com 
  10 */
  11
  12#undef DEBUG
  13
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/init.h>
  17#include <linux/threads.h>
  18#include <linux/spinlock.h>
  19#include <linux/types.h>
  20#include <linux/pci.h>
  21#include <linux/delay.h>
  22#include <linux/initrd.h>
  23#include <linux/bitops.h>
  24#include <linux/export.h>
  25#include <linux/kexec.h>
  26#include <linux/irq.h>
  27#include <linux/memblock.h>
  28#include <linux/of.h>
  29#include <linux/of_fdt.h>
  30#include <linux/libfdt.h>
  31#include <linux/cpu.h>
  32#include <linux/pgtable.h>
  33#include <linux/seq_buf.h>
  34
  35#include <asm/rtas.h>
  36#include <asm/page.h>
  37#include <asm/processor.h>
  38#include <asm/irq.h>
  39#include <asm/io.h>
  40#include <asm/kdump.h>
  41#include <asm/smp.h>
  42#include <asm/mmu.h>
  43#include <asm/paca.h>
  44#include <asm/powernv.h>
  45#include <asm/iommu.h>
  46#include <asm/btext.h>
  47#include <asm/sections.h>
  48#include <asm/setup.h>
  49#include <asm/pci-bridge.h>
  50#include <asm/kexec.h>
  51#include <asm/opal.h>
  52#include <asm/fadump.h>
  53#include <asm/epapr_hcalls.h>
  54#include <asm/firmware.h>
  55#include <asm/dt_cpu_ftrs.h>
  56#include <asm/drmem.h>
  57#include <asm/ultravisor.h>
  58#include <asm/prom.h>
 
  59
  60#include <mm/mmu_decl.h>
  61
  62#ifdef DEBUG
  63#define DBG(fmt...) printk(KERN_ERR fmt)
  64#else
  65#define DBG(fmt...)
  66#endif
  67
  68int *chip_id_lookup_table;
  69
  70#ifdef CONFIG_PPC64
  71int __initdata iommu_is_off;
  72int __initdata iommu_force_on;
  73unsigned long tce_alloc_start, tce_alloc_end;
  74u64 ppc64_rma_size;
  75unsigned int boot_cpu_node_count __ro_after_init;
  76#endif
  77static phys_addr_t first_memblock_size;
  78static int __initdata boot_cpu_count;
  79
  80static int __init early_parse_mem(char *p)
  81{
  82	if (!p)
  83		return 1;
  84
  85	memory_limit = PAGE_ALIGN(memparse(p, &p));
  86	DBG("memory limit = 0x%llx\n", memory_limit);
  87
  88	return 0;
  89}
  90early_param("mem", early_parse_mem);
  91
  92/*
  93 * overlaps_initrd - check for overlap with page aligned extension of
  94 * initrd.
  95 */
  96static inline int overlaps_initrd(unsigned long start, unsigned long size)
  97{
  98#ifdef CONFIG_BLK_DEV_INITRD
  99	if (!initrd_start)
 100		return 0;
 101
 102	return	(start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
 103			start <= ALIGN(initrd_end, PAGE_SIZE);
 104#else
 105	return 0;
 106#endif
 107}
 108
 109/**
 110 * move_device_tree - move tree to an unused area, if needed.
 111 *
 112 * The device tree may be allocated beyond our memory limit, or inside the
 113 * crash kernel region for kdump, or within the page aligned range of initrd.
 114 * If so, move it out of the way.
 115 */
 116static void __init move_device_tree(void)
 117{
 118	unsigned long start, size;
 119	void *p;
 120
 121	DBG("-> move_device_tree\n");
 122
 123	start = __pa(initial_boot_params);
 124	size = fdt_totalsize(initial_boot_params);
 125
 126	if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
 127	    !memblock_is_memory(start + size - 1) ||
 128	    overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
 129		p = memblock_alloc_raw(size, PAGE_SIZE);
 130		if (!p)
 131			panic("Failed to allocate %lu bytes to move device tree\n",
 132			      size);
 133		memcpy(p, initial_boot_params, size);
 134		initial_boot_params = p;
 135		DBG("Moved device tree to 0x%px\n", p);
 136	}
 137
 138	DBG("<- move_device_tree\n");
 139}
 140
 141/*
 142 * ibm,pa/pi-features is a per-cpu property that contains a string of
 143 * attribute descriptors, each of which has a 2 byte header plus up
 144 * to 254 bytes worth of processor attribute bits.  First header
 145 * byte specifies the number of bytes following the header.
 146 * Second header byte is an "attribute-specifier" type, of which
 147 * zero is the only currently-defined value.
 148 * Implementation:  Pass in the byte and bit offset for the feature
 149 * that we are interested in.  The function will return -1 if the
 150 * pa-features property is missing, or a 1/0 to indicate if the feature
 151 * is supported/not supported.  Note that the bit numbers are
 152 * big-endian to match the definition in PAPR.
 
 
 
 153 */
 154struct ibm_feature {
 155	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
 156	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
 157	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
 158	unsigned int	cpu_user_ftrs2;	/* PPC_FEATURE2_xxx bit */
 159	unsigned char	pabyte;		/* byte number in ibm,pa/pi-features */
 160	unsigned char	pabit;		/* bit number (big-endian) */
 161	unsigned char	invert;		/* if 1, pa bit set => clear feature */
 162};
 163
 164static struct ibm_feature ibm_pa_features[] __initdata = {
 165	{ .pabyte = 0,  .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
 166	{ .pabyte = 0,  .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
 167	{ .pabyte = 0,  .pabit = 3, .cpu_features  = CPU_FTR_CTRL },
 168	{ .pabyte = 0,  .pabit = 6, .cpu_features  = CPU_FTR_NOEXECUTE },
 169	{ .pabyte = 1,  .pabit = 2, .mmu_features  = MMU_FTR_CI_LARGE_PAGE },
 170#ifdef CONFIG_PPC_RADIX_MMU
 171	{ .pabyte = 40, .pabit = 0, .mmu_features  = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
 172#endif
 173	{ .pabyte = 5,  .pabit = 0, .cpu_features  = CPU_FTR_REAL_LE,
 174				    .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
 175	/*
 176	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
 177	 * we don't want to turn on TM here, so we use the *_COMP versions
 178	 * which are 0 if the kernel doesn't support TM.
 179	 */
 180	{ .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
 181	  .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
 182
 183	{ .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
 
 184};
 185
 186/*
 187 * ibm,pi-features property provides the support of processor specific
 188 * options not described in ibm,pa-features. Right now use byte 0, bit 3
 189 * which indicates the occurrence of DSI interrupt when the paste operation
 190 * on the suspended NX window.
 191 */
 192static struct ibm_feature ibm_pi_features[] __initdata = {
 193	{ .pabyte = 0, .pabit = 3, .mmu_features  = MMU_FTR_NX_DSI },
 
 194};
 195
 196static void __init scan_features(unsigned long node, const unsigned char *ftrs,
 197				 unsigned long tablelen,
 198				 struct ibm_feature *fp,
 199				 unsigned long ft_size)
 200{
 201	unsigned long i, len, bit;
 202
 203	/* find descriptor with type == 0 */
 204	for (;;) {
 205		if (tablelen < 3)
 206			return;
 207		len = 2 + ftrs[0];
 208		if (tablelen < len)
 209			return;		/* descriptor 0 not found */
 210		if (ftrs[1] == 0)
 211			break;
 212		tablelen -= len;
 213		ftrs += len;
 214	}
 215
 216	/* loop over bits we know about */
 217	for (i = 0; i < ft_size; ++i, ++fp) {
 218		if (fp->pabyte >= ftrs[0])
 219			continue;
 220		bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
 221		if (bit ^ fp->invert) {
 222			cur_cpu_spec->cpu_features |= fp->cpu_features;
 223			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
 224			cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
 225			cur_cpu_spec->mmu_features |= fp->mmu_features;
 226		} else {
 227			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
 228			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
 229			cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
 230			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
 231		}
 232	}
 233}
 234
 235static void __init check_cpu_features(unsigned long node, char *name,
 236				      struct ibm_feature *fp,
 237				      unsigned long size)
 238{
 239	const unsigned char *pa_ftrs;
 240	int tablelen;
 241
 242	pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
 243	if (pa_ftrs == NULL)
 244		return;
 245
 246	scan_features(node, pa_ftrs, tablelen, fp, size);
 247}
 248
 249#ifdef CONFIG_PPC_64S_HASH_MMU
 250static void __init init_mmu_slb_size(unsigned long node)
 251{
 252	const __be32 *slb_size_ptr;
 253
 254	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
 255			of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
 256
 257	if (slb_size_ptr)
 258		mmu_slb_size = be32_to_cpup(slb_size_ptr);
 259}
 260#else
 261#define init_mmu_slb_size(node) do { } while(0)
 262#endif
 263
 264static struct feature_property {
 265	const char *name;
 266	u32 min_value;
 267	unsigned long cpu_feature;
 268	unsigned long cpu_user_ftr;
 269} feature_properties[] __initdata = {
 270#ifdef CONFIG_ALTIVEC
 271	{"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
 272	{"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
 273#endif /* CONFIG_ALTIVEC */
 274#ifdef CONFIG_VSX
 275	/* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
 276	{"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
 277#endif /* CONFIG_VSX */
 278#ifdef CONFIG_PPC64
 279	{"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
 280	{"ibm,purr", 1, CPU_FTR_PURR, 0},
 281	{"ibm,spurr", 1, CPU_FTR_SPURR, 0},
 282#endif /* CONFIG_PPC64 */
 283};
 284
 285#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
 286static __init void identical_pvr_fixup(unsigned long node)
 287{
 288	unsigned int pvr;
 289	const char *model = of_get_flat_dt_prop(node, "model", NULL);
 290
 291	/*
 292	 * Since 440GR(x)/440EP(x) processors have the same pvr,
 293	 * we check the node path and set bit 28 in the cur_cpu_spec
 294	 * pvr for EP(x) processor version. This bit is always 0 in
 295	 * the "real" pvr. Then we call identify_cpu again with
 296	 * the new logical pvr to enable FPU support.
 297	 */
 298	if (model && strstr(model, "440EP")) {
 299		pvr = cur_cpu_spec->pvr_value | 0x8;
 300		identify_cpu(0, pvr);
 301		DBG("Using logical pvr %x for %s\n", pvr, model);
 302	}
 303}
 304#else
 305#define identical_pvr_fixup(node) do { } while(0)
 306#endif
 307
 308static void __init check_cpu_feature_properties(unsigned long node)
 309{
 310	int i;
 311	struct feature_property *fp = feature_properties;
 312	const __be32 *prop;
 313
 314	for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
 315		prop = of_get_flat_dt_prop(node, fp->name, NULL);
 316		if (prop && be32_to_cpup(prop) >= fp->min_value) {
 317			cur_cpu_spec->cpu_features |= fp->cpu_feature;
 318			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
 319		}
 320	}
 321}
 322
 323static int __init early_init_dt_scan_cpus(unsigned long node,
 324					  const char *uname, int depth,
 325					  void *data)
 326{
 327	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 
 328	const __be32 *prop;
 329	const __be32 *intserv;
 330	int i, nthreads;
 331	int len;
 332	int found = -1;
 333	int found_thread = 0;
 334
 335	/* We are scanning "cpu" nodes only */
 336	if (type == NULL || strcmp(type, "cpu") != 0)
 337		return 0;
 338
 339	if (IS_ENABLED(CONFIG_PPC64))
 340		boot_cpu_node_count++;
 341
 342	/* Get physical cpuid */
 343	intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
 344	if (!intserv)
 345		intserv = of_get_flat_dt_prop(node, "reg", &len);
 346
 347	nthreads = len / sizeof(int);
 348
 349	/*
 350	 * Now see if any of these threads match our boot cpu.
 351	 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
 352	 */
 353	for (i = 0; i < nthreads; i++) {
 354		if (be32_to_cpu(intserv[i]) ==
 355			fdt_boot_cpuid_phys(initial_boot_params)) {
 356			found = boot_cpu_count;
 357			found_thread = i;
 358		}
 359#ifdef CONFIG_SMP
 360		/* logical cpu id is always 0 on UP kernels */
 361		boot_cpu_count++;
 362#endif
 363	}
 364
 365	/* Not the boot CPU */
 366	if (found < 0)
 367		return 0;
 368
 369	DBG("boot cpu: logical %d physical %d\n", found,
 370	    be32_to_cpu(intserv[found_thread]));
 371	boot_cpuid = found;
 372
 373	// Pass the boot CPU's hard CPU id back to our caller
 374	*((u32 *)data) = be32_to_cpu(intserv[found_thread]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 375
 376	/*
 377	 * PAPR defines "logical" PVR values for cpus that
 378	 * meet various levels of the architecture:
 379	 * 0x0f000001	Architecture version 2.04
 380	 * 0x0f000002	Architecture version 2.05
 381	 * If the cpu-version property in the cpu node contains
 382	 * such a value, we call identify_cpu again with the
 383	 * logical PVR value in order to use the cpu feature
 384	 * bits appropriate for the architecture level.
 385	 *
 386	 * A POWER6 partition in "POWER6 architected" mode
 387	 * uses the 0x0f000002 PVR value; in POWER5+ mode
 388	 * it uses 0x0f000001.
 389	 *
 390	 * If we're using device tree CPU feature discovery then we don't
 391	 * support the cpu-version property, and it's the responsibility of the
 392	 * firmware/hypervisor to provide the correct feature set for the
 393	 * architecture level via the ibm,powerpc-cpu-features binding.
 394	 */
 395	if (!dt_cpu_ftrs_in_use()) {
 396		prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
 397		if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
 398			identify_cpu(0, be32_to_cpup(prop));
 399			seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(prop));
 400		}
 401
 402		check_cpu_feature_properties(node);
 403		check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
 404				   ARRAY_SIZE(ibm_pa_features));
 405		check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
 406				   ARRAY_SIZE(ibm_pi_features));
 407	}
 408
 409	identical_pvr_fixup(node);
 
 
 
 
 
 
 410	init_mmu_slb_size(node);
 411
 412#ifdef CONFIG_PPC64
 413	if (nthreads == 1)
 414		cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
 415	else if (!dt_cpu_ftrs_in_use())
 416		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
 417#endif
 418
 419	return 0;
 420}
 421
 422static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
 423						const char *uname,
 424						int depth, void *data)
 425{
 426	const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
 427
 428	/* Use common scan routine to determine if this is the chosen node */
 429	if (early_init_dt_scan_chosen(data) < 0)
 430		return 0;
 431
 432#ifdef CONFIG_PPC64
 433	/* check if iommu is forced on or off */
 434	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
 435		iommu_is_off = 1;
 436	if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
 437		iommu_force_on = 1;
 438#endif
 439
 440	/* mem=x on the command line is the preferred mechanism */
 441	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
 442	if (lprop)
 443		memory_limit = *lprop;
 444
 445#ifdef CONFIG_PPC64
 446	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
 447	if (lprop)
 448		tce_alloc_start = *lprop;
 449	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
 450	if (lprop)
 451		tce_alloc_end = *lprop;
 452#endif
 453
 454#ifdef CONFIG_KEXEC_CORE
 455	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
 456	if (lprop)
 457		crashk_res.start = *lprop;
 458
 459	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
 460	if (lprop)
 461		crashk_res.end = crashk_res.start + *lprop - 1;
 462#endif
 463
 464	/* break now */
 465	return 1;
 466}
 467
 468/*
 469 * Compare the range against max mem limit and update
 470 * size if it cross the limit.
 471 */
 472
 473#ifdef CONFIG_SPARSEMEM
 474static bool __init validate_mem_limit(u64 base, u64 *size)
 475{
 476	u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
 477
 478	if (base >= max_mem)
 479		return false;
 480	if ((base + *size) > max_mem)
 481		*size = max_mem - base;
 482	return true;
 483}
 484#else
 485static bool __init validate_mem_limit(u64 base, u64 *size)
 486{
 487	return true;
 488}
 489#endif
 490
 491#ifdef CONFIG_PPC_PSERIES
 492/*
 493 * Interpret the ibm dynamic reconfiguration memory LMBs.
 494 * This contains a list of memory blocks along with NUMA affinity
 495 * information.
 496 */
 497static int  __init early_init_drmem_lmb(struct drmem_lmb *lmb,
 498					const __be32 **usm,
 499					void *data)
 500{
 501	u64 base, size;
 502	int is_kexec_kdump = 0, rngs;
 503
 504	base = lmb->base_addr;
 505	size = drmem_lmb_size();
 506	rngs = 1;
 507
 508	/*
 509	 * Skip this block if the reserved bit is set in flags
 510	 * or if the block is not assigned to this partition.
 511	 */
 512	if ((lmb->flags & DRCONF_MEM_RESERVED) ||
 513	    !(lmb->flags & DRCONF_MEM_ASSIGNED))
 514		return 0;
 515
 516	if (*usm)
 517		is_kexec_kdump = 1;
 518
 519	if (is_kexec_kdump) {
 520		/*
 521		 * For each memblock in ibm,dynamic-memory, a
 522		 * corresponding entry in linux,drconf-usable-memory
 523		 * property contains a counter 'p' followed by 'p'
 524		 * (base, size) duple. Now read the counter from
 525		 * linux,drconf-usable-memory property
 526		 */
 527		rngs = dt_mem_next_cell(dt_root_size_cells, usm);
 528		if (!rngs) /* there are no (base, size) duple */
 529			return 0;
 530	}
 531
 532	do {
 533		if (is_kexec_kdump) {
 534			base = dt_mem_next_cell(dt_root_addr_cells, usm);
 535			size = dt_mem_next_cell(dt_root_size_cells, usm);
 536		}
 537
 538		if (iommu_is_off) {
 539			if (base >= 0x80000000ul)
 540				continue;
 541			if ((base + size) > 0x80000000ul)
 542				size = 0x80000000ul - base;
 543		}
 544
 545		if (!validate_mem_limit(base, &size))
 546			continue;
 547
 548		DBG("Adding: %llx -> %llx\n", base, size);
 549		memblock_add(base, size);
 550
 551		if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
 552			memblock_mark_hotplug(base, size);
 553	} while (--rngs);
 554
 555	return 0;
 556}
 557#endif /* CONFIG_PPC_PSERIES */
 558
 559static int __init early_init_dt_scan_memory_ppc(void)
 560{
 561#ifdef CONFIG_PPC_PSERIES
 562	const void *fdt = initial_boot_params;
 563	int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
 564
 565	if (node > 0)
 566		walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
 567
 568#endif
 569
 570	return early_init_dt_scan_memory();
 571}
 572
 573/*
 574 * For a relocatable kernel, we need to get the memstart_addr first,
 575 * then use it to calculate the virtual kernel start address. This has
 576 * to happen at a very early stage (before machine_init). In this case,
 577 * we just want to get the memstart_address and would not like to mess the
 578 * memblock at this stage. So introduce a variable to skip the memblock_add()
 579 * for this reason.
 580 */
 581#ifdef CONFIG_RELOCATABLE
 582static int add_mem_to_memblock = 1;
 583#else
 584#define add_mem_to_memblock 1
 585#endif
 586
 587void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 588{
 589#ifdef CONFIG_PPC64
 590	if (iommu_is_off) {
 591		if (base >= 0x80000000ul)
 592			return;
 593		if ((base + size) > 0x80000000ul)
 594			size = 0x80000000ul - base;
 595	}
 596#endif
 597	/* Keep track of the beginning of memory -and- the size of
 598	 * the very first block in the device-tree as it represents
 599	 * the RMA on ppc64 server
 600	 */
 601	if (base < memstart_addr) {
 602		memstart_addr = base;
 603		first_memblock_size = size;
 604	}
 605
 606	/* Add the chunk to the MEMBLOCK list */
 607	if (add_mem_to_memblock) {
 608		if (validate_mem_limit(base, &size))
 609			memblock_add(base, size);
 610	}
 611}
 612
 613static void __init early_reserve_mem_dt(void)
 614{
 615	unsigned long i, dt_root;
 616	int len;
 617	const __be32 *prop;
 618
 619	early_init_fdt_reserve_self();
 620	early_init_fdt_scan_reserved_mem();
 621
 622	dt_root = of_get_flat_dt_root();
 623
 624	prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
 625
 626	if (!prop)
 627		return;
 628
 629	DBG("Found new-style reserved-ranges\n");
 630
 631	/* Each reserved range is an (address,size) pair, 2 cells each,
 632	 * totalling 4 cells per range. */
 633	for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
 634		u64 base, size;
 635
 636		base = of_read_number(prop + (i * 4) + 0, 2);
 637		size = of_read_number(prop + (i * 4) + 2, 2);
 638
 639		if (size) {
 640			DBG("reserving: %llx -> %llx\n", base, size);
 641			memblock_reserve(base, size);
 642		}
 643	}
 644}
 645
 646static void __init early_reserve_mem(void)
 647{
 648	__be64 *reserve_map;
 649
 650	reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
 651			fdt_off_mem_rsvmap(initial_boot_params));
 652
 653	/* Look for the new "reserved-regions" property in the DT */
 654	early_reserve_mem_dt();
 655
 656#ifdef CONFIG_BLK_DEV_INITRD
 657	/* Then reserve the initrd, if any */
 658	if (initrd_start && (initrd_end > initrd_start)) {
 659		memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
 660			ALIGN(initrd_end, PAGE_SIZE) -
 661			ALIGN_DOWN(initrd_start, PAGE_SIZE));
 662	}
 663#endif /* CONFIG_BLK_DEV_INITRD */
 664
 665	if (!IS_ENABLED(CONFIG_PPC32))
 666		return;
 667
 668	/* 
 669	 * Handle the case where we might be booting from an old kexec
 670	 * image that setup the mem_rsvmap as pairs of 32-bit values
 671	 */
 672	if (be64_to_cpup(reserve_map) > 0xffffffffull) {
 673		u32 base_32, size_32;
 674		__be32 *reserve_map_32 = (__be32 *)reserve_map;
 675
 676		DBG("Found old 32-bit reserve map\n");
 677
 678		while (1) {
 679			base_32 = be32_to_cpup(reserve_map_32++);
 680			size_32 = be32_to_cpup(reserve_map_32++);
 681			if (size_32 == 0)
 682				break;
 683			DBG("reserving: %x -> %x\n", base_32, size_32);
 684			memblock_reserve(base_32, size_32);
 685		}
 686		return;
 687	}
 688}
 689
 690#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 691static bool tm_disabled __initdata;
 692
 693static int __init parse_ppc_tm(char *str)
 694{
 695	bool res;
 696
 697	if (kstrtobool(str, &res))
 698		return -EINVAL;
 699
 700	tm_disabled = !res;
 701
 702	return 0;
 703}
 704early_param("ppc_tm", parse_ppc_tm);
 705
 706static void __init tm_init(void)
 707{
 708	if (tm_disabled) {
 709		pr_info("Disabling hardware transactional memory (HTM)\n");
 710		cur_cpu_spec->cpu_user_features2 &=
 711			~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
 712		cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
 713		return;
 714	}
 715
 716	pnv_tm_init();
 717}
 718#else
 719static void tm_init(void) { }
 720#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 721
 722static int __init
 723early_init_dt_scan_model(unsigned long node, const char *uname,
 724			 int depth, void *data)
 725{
 726	const char *prop;
 727
 728	if (depth != 0)
 729		return 0;
 730
 731	prop = of_get_flat_dt_prop(node, "model", NULL);
 732	if (prop)
 733		seq_buf_printf(&ppc_hw_desc, "%s ", prop);
 734
 735	/* break now */
 736	return 1;
 737}
 738
 739#ifdef CONFIG_PPC64
 740static void __init save_fscr_to_task(void)
 741{
 742	/*
 743	 * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
 744	 * have configured via the device tree features or via __init_FSCR().
 745	 * That value will then be propagated to pid 1 (init) and all future
 746	 * processes.
 747	 */
 748	if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
 749		init_task.thread.fscr = mfspr(SPRN_FSCR);
 750}
 751#else
 752static inline void save_fscr_to_task(void) {}
 753#endif
 754
 755
 756void __init early_init_devtree(void *params)
 757{
 758	u32 boot_cpu_hwid;
 759	phys_addr_t limit;
 760
 761	DBG(" -> early_init_devtree(%px)\n", params);
 762
 763	/* Too early to BUG_ON(), do it by hand */
 764	if (!early_init_dt_verify(params))
 765		panic("BUG: Failed verifying flat device tree, bad version?");
 766
 767	of_scan_flat_dt(early_init_dt_scan_model, NULL);
 768
 769#ifdef CONFIG_PPC_RTAS
 770	/* Some machines might need RTAS info for debugging, grab it now. */
 771	of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
 772#endif
 773
 774#ifdef CONFIG_PPC_POWERNV
 775	/* Some machines might need OPAL info for debugging, grab it now. */
 776	of_scan_flat_dt(early_init_dt_scan_opal, NULL);
 777
 778	/* Scan tree for ultravisor feature */
 779	of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
 780#endif
 781
 782#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
 783	/* scan tree to see if dump is active during last boot */
 784	of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
 785#endif
 786
 787	/* Retrieve various informations from the /chosen node of the
 788	 * device-tree, including the platform type, initrd location and
 789	 * size, TCE reserve, and more ...
 790	 */
 791	of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
 792
 
 
 
 793	/* Scan memory nodes and rebuild MEMBLOCKs */
 794	early_init_dt_scan_root();
 795	early_init_dt_scan_memory_ppc();
 796
 797	/*
 798	 * As generic code authors expect to be able to use static keys
 799	 * in early_param() handlers, we initialize the static keys just
 800	 * before parsing early params (it's fine to call jump_label_init()
 801	 * more than once).
 802	 */
 803	jump_label_init();
 804	parse_early_param();
 805
 806	/* make sure we've parsed cmdline for mem= before this */
 807	if (memory_limit)
 808		first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
 809	setup_initial_memory_limit(memstart_addr, first_memblock_size);
 810	/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
 811	memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
 
 
 
 
 
 812	/* If relocatable, reserve first 32k for interrupt vectors etc. */
 
 
 813	if (PHYSICAL_START > MEMORY_START)
 814		memblock_reserve(MEMORY_START, 0x8000);
 815	reserve_kdump_trampoline();
 816#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
 817	/*
 818	 * If we fail to reserve memory for firmware-assisted dump then
 819	 * fallback to kexec based kdump.
 820	 */
 821	if (fadump_reserve_mem() == 0)
 822#endif
 823		reserve_crashkernel();
 824	early_reserve_mem();
 825
 826	/* Ensure that total memory size is page-aligned. */
 827	limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
 828	memblock_enforce_memory_limit(limit);
 
 
 
 829
 830#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
 831	if (!early_radix_enabled())
 832		memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
 833#endif
 834
 835	memblock_allow_resize();
 836	memblock_dump_all();
 837
 838	DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
 839
 840	/* We may need to relocate the flat tree, do it now.
 841	 * FIXME .. and the initrd too? */
 842	move_device_tree();
 843
 844	DBG("Scanning CPUs ...\n");
 845
 846	dt_cpu_ftrs_scan();
 847
 848	// We can now add the CPU name & PVR to the hardware description
 849	seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
 850
 851	/* Retrieve CPU related informations from the flat tree
 852	 * (altivec support, boot CPU ID, ...)
 853	 */
 854	of_scan_flat_dt(early_init_dt_scan_cpus, &boot_cpu_hwid);
 855	if (boot_cpuid < 0) {
 856		printk("Failed to identify boot CPU !\n");
 857		BUG();
 858	}
 859
 860	save_fscr_to_task();
 861
 862#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
 863	/* We'll later wait for secondaries to check in; there are
 864	 * NCPUS-1 non-boot CPUs  :-)
 865	 */
 866	spinning_secondaries = boot_cpu_count - 1;
 867#endif
 868
 869	mmu_early_init_devtree();
 870
 871	// NB. paca is not installed until later in early_setup()
 872	allocate_paca_ptrs();
 873	allocate_paca(boot_cpuid);
 874	set_hard_smp_processor_id(boot_cpuid, boot_cpu_hwid);
 875
 876#ifdef CONFIG_PPC_POWERNV
 877	/* Scan and build the list of machine check recoverable ranges */
 878	of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
 879#endif
 880	epapr_paravirt_early_init();
 881
 882	/* Now try to figure out if we are running on LPAR and so on */
 883	pseries_probe_fw_features();
 884
 885	/*
 886	 * Initialize pkey features and default AMR/IAMR values
 887	 */
 888	pkey_early_init_devtree();
 889
 890#ifdef CONFIG_PPC_PS3
 891	/* Identify PS3 firmware */
 892	if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
 893		powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
 894#endif
 
 
 
 895
 896	tm_init();
 897
 898	DBG(" <- early_init_devtree()\n");
 899}
 900
 901#ifdef CONFIG_RELOCATABLE
 902/*
 903 * This function run before early_init_devtree, so we have to init
 904 * initial_boot_params.
 905 */
 906void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
 907{
 908	/* Setup flat device-tree pointer */
 909	initial_boot_params = params;
 910
 911	/*
 912	 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
 913	 * mess the memblock.
 914	 */
 915	add_mem_to_memblock = 0;
 916	early_init_dt_scan_root();
 917	early_init_dt_scan_memory_ppc();
 918	add_mem_to_memblock = 1;
 919
 920	if (size)
 921		*size = first_memblock_size;
 922}
 923#endif
 924
 925/*******
 926 *
 927 * New implementation of the OF "find" APIs, return a refcounted
 928 * object, call of_node_put() when done.  The device tree and list
 929 * are protected by a rw_lock.
 930 *
 931 * Note that property management will need some locking as well,
 932 * this isn't dealt with yet.
 933 *
 934 *******/
 935
 936/**
 937 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
 938 * @np: device node of the device
 939 *
 940 * This looks for a property "ibm,chip-id" in the node or any
 941 * of its parents and returns its content, or -1 if it cannot
 942 * be found.
 943 */
 944int of_get_ibm_chip_id(struct device_node *np)
 945{
 946	of_node_get(np);
 947	while (np) {
 948		u32 chip_id;
 949
 950		/*
 951		 * Skiboot may produce memory nodes that contain more than one
 952		 * cell in chip-id, we only read the first one here.
 953		 */
 954		if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
 955			of_node_put(np);
 956			return chip_id;
 957		}
 958
 959		np = of_get_next_parent(np);
 960	}
 961	return -1;
 962}
 963EXPORT_SYMBOL(of_get_ibm_chip_id);
 964
 965/**
 966 * cpu_to_chip_id - Return the cpus chip-id
 967 * @cpu: The logical cpu number.
 968 *
 969 * Return the value of the ibm,chip-id property corresponding to the given
 970 * logical cpu number. If the chip-id can not be found, returns -1.
 971 */
 972int cpu_to_chip_id(int cpu)
 973{
 974	struct device_node *np;
 975	int ret = -1, idx;
 976
 977	idx = cpu / threads_per_core;
 978	if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
 979		return chip_id_lookup_table[idx];
 980
 981	np = of_get_cpu_node(cpu, NULL);
 982	if (np) {
 983		ret = of_get_ibm_chip_id(np);
 984		of_node_put(np);
 985
 986		if (chip_id_lookup_table)
 987			chip_id_lookup_table[idx] = ret;
 988	}
 989
 990	return ret;
 991}
 992EXPORT_SYMBOL(cpu_to_chip_id);
 993
 994bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
 995{
 996#ifdef CONFIG_SMP
 997	/*
 998	 * Early firmware scanning must use this rather than
 999	 * get_hard_smp_processor_id because we don't have pacas allocated
1000	 * until memory topology is discovered.
1001	 */
1002	if (cpu_to_phys_id != NULL)
1003		return (int)phys_id == cpu_to_phys_id[cpu];
1004#endif
1005
1006	return (int)phys_id == get_hard_smp_processor_id(cpu);
1007}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Procedures for creating, accessing and interpreting the device tree.
   4 *
   5 * Paul Mackerras	August 1996.
   6 * Copyright (C) 1996-2005 Paul Mackerras.
   7 * 
   8 *  Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
   9 *    {engebret|bergner}@us.ibm.com 
  10 */
  11
  12#undef DEBUG
  13
  14#include <linux/kernel.h>
  15#include <linux/string.h>
  16#include <linux/init.h>
  17#include <linux/threads.h>
  18#include <linux/spinlock.h>
  19#include <linux/types.h>
  20#include <linux/pci.h>
  21#include <linux/delay.h>
  22#include <linux/initrd.h>
  23#include <linux/bitops.h>
  24#include <linux/export.h>
  25#include <linux/kexec.h>
  26#include <linux/irq.h>
  27#include <linux/memblock.h>
  28#include <linux/of.h>
  29#include <linux/of_fdt.h>
  30#include <linux/libfdt.h>
  31#include <linux/cpu.h>
  32#include <linux/pgtable.h>
  33#include <linux/seq_buf.h>
  34
  35#include <asm/rtas.h>
  36#include <asm/page.h>
  37#include <asm/processor.h>
  38#include <asm/irq.h>
  39#include <asm/io.h>
  40#include <asm/kdump.h>
  41#include <asm/smp.h>
  42#include <asm/mmu.h>
  43#include <asm/paca.h>
  44#include <asm/powernv.h>
  45#include <asm/iommu.h>
  46#include <asm/btext.h>
  47#include <asm/sections.h>
  48#include <asm/setup.h>
  49#include <asm/pci-bridge.h>
  50#include <asm/kexec.h>
  51#include <asm/opal.h>
  52#include <asm/fadump.h>
  53#include <asm/epapr_hcalls.h>
  54#include <asm/firmware.h>
  55#include <asm/dt_cpu_ftrs.h>
  56#include <asm/drmem.h>
  57#include <asm/ultravisor.h>
  58#include <asm/prom.h>
  59#include <asm/plpks.h>
  60
  61#include <mm/mmu_decl.h>
  62
  63#ifdef DEBUG
  64#define DBG(fmt...) printk(KERN_ERR fmt)
  65#else
  66#define DBG(fmt...)
  67#endif
  68
  69int *chip_id_lookup_table;
  70
  71#ifdef CONFIG_PPC64
  72int __initdata iommu_is_off;
  73int __initdata iommu_force_on;
  74unsigned long tce_alloc_start, tce_alloc_end;
  75u64 ppc64_rma_size;
  76unsigned int boot_cpu_node_count __ro_after_init;
  77#endif
  78static phys_addr_t first_memblock_size;
  79static int __initdata boot_cpu_count;
  80
  81static int __init early_parse_mem(char *p)
  82{
  83	if (!p)
  84		return 1;
  85
  86	memory_limit = PAGE_ALIGN(memparse(p, &p));
  87	DBG("memory limit = 0x%llx\n", memory_limit);
  88
  89	return 0;
  90}
  91early_param("mem", early_parse_mem);
  92
  93/*
  94 * overlaps_initrd - check for overlap with page aligned extension of
  95 * initrd.
  96 */
  97static inline int overlaps_initrd(unsigned long start, unsigned long size)
  98{
  99#ifdef CONFIG_BLK_DEV_INITRD
 100	if (!initrd_start)
 101		return 0;
 102
 103	return	(start + size) > ALIGN_DOWN(initrd_start, PAGE_SIZE) &&
 104			start <= ALIGN(initrd_end, PAGE_SIZE);
 105#else
 106	return 0;
 107#endif
 108}
 109
 110/**
 111 * move_device_tree - move tree to an unused area, if needed.
 112 *
 113 * The device tree may be allocated beyond our memory limit, or inside the
 114 * crash kernel region for kdump, or within the page aligned range of initrd.
 115 * If so, move it out of the way.
 116 */
 117static void __init move_device_tree(void)
 118{
 119	unsigned long start, size;
 120	void *p;
 121
 122	DBG("-> move_device_tree\n");
 123
 124	start = __pa(initial_boot_params);
 125	size = fdt_totalsize(initial_boot_params);
 126
 127	if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) ||
 128	    !memblock_is_memory(start + size - 1) ||
 129	    overlaps_crashkernel(start, size) || overlaps_initrd(start, size)) {
 130		p = memblock_alloc_raw(size, PAGE_SIZE);
 131		if (!p)
 132			panic("Failed to allocate %lu bytes to move device tree\n",
 133			      size);
 134		memcpy(p, initial_boot_params, size);
 135		initial_boot_params = p;
 136		DBG("Moved device tree to 0x%px\n", p);
 137	}
 138
 139	DBG("<- move_device_tree\n");
 140}
 141
 142/*
 143 * ibm,pa/pi-features is a per-cpu property that contains a string of
 144 * attribute descriptors, each of which has a 2 byte header plus up
 145 * to 254 bytes worth of processor attribute bits.  First header
 146 * byte specifies the number of bytes following the header.
 147 * Second header byte is an "attribute-specifier" type, of which
 148 * zero is the only currently-defined value.
 149 * Implementation:  Pass in the byte and bit offset for the feature
 150 * that we are interested in.  The function will return -1 if the
 151 * pa-features property is missing, or a 1/0 to indicate if the feature
 152 * is supported/not supported.  Note that the bit numbers are
 153 * big-endian to match the definition in PAPR.
 154 * Note: the 'clear' flag clears the feature if the bit is set in the
 155 * ibm,pa/pi-features property, it does not set the feature if the
 156 * bit is clear.
 157 */
 158struct ibm_feature {
 159	unsigned long	cpu_features;	/* CPU_FTR_xxx bit */
 160	unsigned long	mmu_features;	/* MMU_FTR_xxx bit */
 161	unsigned int	cpu_user_ftrs;	/* PPC_FEATURE_xxx bit */
 162	unsigned int	cpu_user_ftrs2;	/* PPC_FEATURE2_xxx bit */
 163	unsigned char	pabyte;		/* byte number in ibm,pa/pi-features */
 164	unsigned char	pabit;		/* bit number (big-endian) */
 165	unsigned char	clear;		/* if 1, pa bit set => clear feature */
 166};
 167
 168static struct ibm_feature ibm_pa_features[] __initdata = {
 169	{ .pabyte = 0,  .pabit = 0, .cpu_user_ftrs = PPC_FEATURE_HAS_MMU },
 170	{ .pabyte = 0,  .pabit = 1, .cpu_user_ftrs = PPC_FEATURE_HAS_FPU },
 171	{ .pabyte = 0,  .pabit = 3, .cpu_features  = CPU_FTR_CTRL },
 172	{ .pabyte = 0,  .pabit = 6, .cpu_features  = CPU_FTR_NOEXECUTE },
 173	{ .pabyte = 1,  .pabit = 2, .mmu_features  = MMU_FTR_CI_LARGE_PAGE },
 174#ifdef CONFIG_PPC_RADIX_MMU
 175	{ .pabyte = 40, .pabit = 0, .mmu_features  = MMU_FTR_TYPE_RADIX | MMU_FTR_GTSE },
 176#endif
 177	{ .pabyte = 5,  .pabit = 0, .cpu_features  = CPU_FTR_REAL_LE,
 178				    .cpu_user_ftrs = PPC_FEATURE_TRUE_LE },
 179	/*
 180	 * If the kernel doesn't support TM (ie CONFIG_PPC_TRANSACTIONAL_MEM=n),
 181	 * we don't want to turn on TM here, so we use the *_COMP versions
 182	 * which are 0 if the kernel doesn't support TM.
 183	 */
 184	{ .pabyte = 22, .pabit = 0, .cpu_features = CPU_FTR_TM_COMP,
 185	  .cpu_user_ftrs2 = PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_HTM_NOSC_COMP },
 186
 187	{ .pabyte = 64, .pabit = 0, .cpu_features = CPU_FTR_DAWR1 },
 188	{ .pabyte = 68, .pabit = 5, .cpu_features = CPU_FTR_DEXCR_NPHIE },
 189};
 190
 191/*
 192 * ibm,pi-features property provides the support of processor specific
 193 * options not described in ibm,pa-features. Right now use byte 0, bit 3
 194 * which indicates the occurrence of DSI interrupt when the paste operation
 195 * on the suspended NX window.
 196 */
 197static struct ibm_feature ibm_pi_features[] __initdata = {
 198	{ .pabyte = 0, .pabit = 3, .mmu_features  = MMU_FTR_NX_DSI },
 199	{ .pabyte = 0, .pabit = 4, .cpu_features  = CPU_FTR_DBELL, .clear = 1 },
 200};
 201
 202static void __init scan_features(unsigned long node, const unsigned char *ftrs,
 203				 unsigned long tablelen,
 204				 struct ibm_feature *fp,
 205				 unsigned long ft_size)
 206{
 207	unsigned long i, len, bit;
 208
 209	/* find descriptor with type == 0 */
 210	for (;;) {
 211		if (tablelen < 3)
 212			return;
 213		len = 2 + ftrs[0];
 214		if (tablelen < len)
 215			return;		/* descriptor 0 not found */
 216		if (ftrs[1] == 0)
 217			break;
 218		tablelen -= len;
 219		ftrs += len;
 220	}
 221
 222	/* loop over bits we know about */
 223	for (i = 0; i < ft_size; ++i, ++fp) {
 224		if (fp->pabyte >= ftrs[0])
 225			continue;
 226		bit = (ftrs[2 + fp->pabyte] >> (7 - fp->pabit)) & 1;
 227		if (bit && !fp->clear) {
 228			cur_cpu_spec->cpu_features |= fp->cpu_features;
 229			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs;
 230			cur_cpu_spec->cpu_user_features2 |= fp->cpu_user_ftrs2;
 231			cur_cpu_spec->mmu_features |= fp->mmu_features;
 232		} else if (bit == fp->clear) {
 233			cur_cpu_spec->cpu_features &= ~fp->cpu_features;
 234			cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs;
 235			cur_cpu_spec->cpu_user_features2 &= ~fp->cpu_user_ftrs2;
 236			cur_cpu_spec->mmu_features &= ~fp->mmu_features;
 237		}
 238	}
 239}
 240
 241static void __init check_cpu_features(unsigned long node, char *name,
 242				      struct ibm_feature *fp,
 243				      unsigned long size)
 244{
 245	const unsigned char *pa_ftrs;
 246	int tablelen;
 247
 248	pa_ftrs = of_get_flat_dt_prop(node, name, &tablelen);
 249	if (pa_ftrs == NULL)
 250		return;
 251
 252	scan_features(node, pa_ftrs, tablelen, fp, size);
 253}
 254
 255#ifdef CONFIG_PPC_64S_HASH_MMU
 256static void __init init_mmu_slb_size(unsigned long node)
 257{
 258	const __be32 *slb_size_ptr;
 259
 260	slb_size_ptr = of_get_flat_dt_prop(node, "slb-size", NULL) ? :
 261			of_get_flat_dt_prop(node, "ibm,slb-size", NULL);
 262
 263	if (slb_size_ptr)
 264		mmu_slb_size = be32_to_cpup(slb_size_ptr);
 265}
 266#else
 267#define init_mmu_slb_size(node) do { } while(0)
 268#endif
 269
 270static struct feature_property {
 271	const char *name;
 272	u32 min_value;
 273	unsigned long cpu_feature;
 274	unsigned long cpu_user_ftr;
 275} feature_properties[] __initdata = {
 276#ifdef CONFIG_ALTIVEC
 277	{"altivec", 0, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
 278	{"ibm,vmx", 1, CPU_FTR_ALTIVEC, PPC_FEATURE_HAS_ALTIVEC},
 279#endif /* CONFIG_ALTIVEC */
 280#ifdef CONFIG_VSX
 281	/* Yes, this _really_ is ibm,vmx == 2 to enable VSX */
 282	{"ibm,vmx", 2, CPU_FTR_VSX, PPC_FEATURE_HAS_VSX},
 283#endif /* CONFIG_VSX */
 284#ifdef CONFIG_PPC64
 285	{"ibm,dfp", 1, 0, PPC_FEATURE_HAS_DFP},
 286	{"ibm,purr", 1, CPU_FTR_PURR, 0},
 287	{"ibm,spurr", 1, CPU_FTR_SPURR, 0},
 288#endif /* CONFIG_PPC64 */
 289};
 290
 291#if defined(CONFIG_44x) && defined(CONFIG_PPC_FPU)
 292static __init void identical_pvr_fixup(unsigned long node)
 293{
 294	unsigned int pvr;
 295	const char *model = of_get_flat_dt_prop(node, "model", NULL);
 296
 297	/*
 298	 * Since 440GR(x)/440EP(x) processors have the same pvr,
 299	 * we check the node path and set bit 28 in the cur_cpu_spec
 300	 * pvr for EP(x) processor version. This bit is always 0 in
 301	 * the "real" pvr. Then we call identify_cpu again with
 302	 * the new logical pvr to enable FPU support.
 303	 */
 304	if (model && strstr(model, "440EP")) {
 305		pvr = cur_cpu_spec->pvr_value | 0x8;
 306		identify_cpu(0, pvr);
 307		DBG("Using logical pvr %x for %s\n", pvr, model);
 308	}
 309}
 310#else
 311#define identical_pvr_fixup(node) do { } while(0)
 312#endif
 313
 314static void __init check_cpu_feature_properties(unsigned long node)
 315{
 316	int i;
 317	struct feature_property *fp = feature_properties;
 318	const __be32 *prop;
 319
 320	for (i = 0; i < (int)ARRAY_SIZE(feature_properties); ++i, ++fp) {
 321		prop = of_get_flat_dt_prop(node, fp->name, NULL);
 322		if (prop && be32_to_cpup(prop) >= fp->min_value) {
 323			cur_cpu_spec->cpu_features |= fp->cpu_feature;
 324			cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftr;
 325		}
 326	}
 327}
 328
 329static int __init early_init_dt_scan_cpus(unsigned long node,
 330					  const char *uname, int depth,
 331					  void *data)
 332{
 333	const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 334	const __be32 *cpu_version = NULL;
 335	const __be32 *prop;
 336	const __be32 *intserv;
 337	int i, nthreads;
 338	int len;
 339	int found = -1;
 340	int found_thread = 0;
 341
 342	/* We are scanning "cpu" nodes only */
 343	if (type == NULL || strcmp(type, "cpu") != 0)
 344		return 0;
 345
 346	if (IS_ENABLED(CONFIG_PPC64))
 347		boot_cpu_node_count++;
 348
 349	/* Get physical cpuid */
 350	intserv = of_get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", &len);
 351	if (!intserv)
 352		intserv = of_get_flat_dt_prop(node, "reg", &len);
 353
 354	nthreads = len / sizeof(int);
 355
 356	/*
 357	 * Now see if any of these threads match our boot cpu.
 358	 * NOTE: This must match the parsing done in smp_setup_cpu_maps.
 359	 */
 360	for (i = 0; i < nthreads; i++) {
 361		if (be32_to_cpu(intserv[i]) ==
 362			fdt_boot_cpuid_phys(initial_boot_params)) {
 363			found = boot_cpu_count;
 364			found_thread = i;
 365		}
 366#ifdef CONFIG_SMP
 367		/* logical cpu id is always 0 on UP kernels */
 368		boot_cpu_count++;
 369#endif
 370	}
 371
 372	/* Not the boot CPU */
 373	if (found < 0)
 374		return 0;
 375
 
 
 376	boot_cpuid = found;
 377
 378	if (IS_ENABLED(CONFIG_PPC64))
 379		boot_cpu_hwid = be32_to_cpu(intserv[found_thread]);
 380
 381	if (nr_cpu_ids % nthreads != 0) {
 382		set_nr_cpu_ids(ALIGN(nr_cpu_ids, nthreads));
 383		pr_warn("nr_cpu_ids was not a multiple of threads_per_core, adjusted to %d\n",
 384			nr_cpu_ids);
 385	}
 386
 387	if (boot_cpuid >= nr_cpu_ids) {
 388		// Remember boot core for smp_setup_cpu_maps()
 389		boot_core_hwid = be32_to_cpu(intserv[0]);
 390
 391		pr_warn("Boot CPU %d (core hwid %d) >= nr_cpu_ids, adjusted boot CPU to %d\n",
 392			boot_cpuid, boot_core_hwid, found_thread);
 393
 394		// Adjust boot CPU to appear on logical core 0
 395		boot_cpuid = found_thread;
 396	}
 397
 398	DBG("boot cpu: logical %d physical %d\n", boot_cpuid,
 399	    be32_to_cpu(intserv[found_thread]));
 400
 401	/*
 402	 * PAPR defines "logical" PVR values for cpus that
 403	 * meet various levels of the architecture:
 404	 * 0x0f000001	Architecture version 2.04
 405	 * 0x0f000002	Architecture version 2.05
 406	 * If the cpu-version property in the cpu node contains
 407	 * such a value, we call identify_cpu again with the
 408	 * logical PVR value in order to use the cpu feature
 409	 * bits appropriate for the architecture level.
 410	 *
 411	 * A POWER6 partition in "POWER6 architected" mode
 412	 * uses the 0x0f000002 PVR value; in POWER5+ mode
 413	 * it uses 0x0f000001.
 414	 *
 415	 * If we're using device tree CPU feature discovery then we don't
 416	 * support the cpu-version property, and it's the responsibility of the
 417	 * firmware/hypervisor to provide the correct feature set for the
 418	 * architecture level via the ibm,powerpc-cpu-features binding.
 419	 */
 420	if (!dt_cpu_ftrs_in_use()) {
 421		prop = of_get_flat_dt_prop(node, "cpu-version", NULL);
 422		if (prop && (be32_to_cpup(prop) & 0xff000000) == 0x0f000000) {
 423			identify_cpu(0, be32_to_cpup(prop));
 424			cpu_version = prop;
 425		}
 426
 427		check_cpu_feature_properties(node);
 428		check_cpu_features(node, "ibm,pa-features", ibm_pa_features,
 429				   ARRAY_SIZE(ibm_pa_features));
 430		check_cpu_features(node, "ibm,pi-features", ibm_pi_features,
 431				   ARRAY_SIZE(ibm_pi_features));
 432	}
 433
 434	identical_pvr_fixup(node);
 435
 436	// We can now add the CPU name & PVR to the hardware description
 437	seq_buf_printf(&ppc_hw_desc, "%s 0x%04lx ", cur_cpu_spec->cpu_name, mfspr(SPRN_PVR));
 438	if (cpu_version)
 439		seq_buf_printf(&ppc_hw_desc, "0x%04x ", be32_to_cpup(cpu_version));
 440
 441	init_mmu_slb_size(node);
 442
 443#ifdef CONFIG_PPC64
 444	if (nthreads == 1)
 445		cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT;
 446	else if (!dt_cpu_ftrs_in_use())
 447		cur_cpu_spec->cpu_features |= CPU_FTR_SMT;
 448#endif
 449
 450	return 0;
 451}
 452
 453static int __init early_init_dt_scan_chosen_ppc(unsigned long node,
 454						const char *uname,
 455						int depth, void *data)
 456{
 457	const unsigned long *lprop; /* All these set by kernel, so no need to convert endian */
 458
 459	/* Use common scan routine to determine if this is the chosen node */
 460	if (early_init_dt_scan_chosen(data) < 0)
 461		return 0;
 462
 463#ifdef CONFIG_PPC64
 464	/* check if iommu is forced on or off */
 465	if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
 466		iommu_is_off = 1;
 467	if (of_get_flat_dt_prop(node, "linux,iommu-force-on", NULL) != NULL)
 468		iommu_force_on = 1;
 469#endif
 470
 471	/* mem=x on the command line is the preferred mechanism */
 472	lprop = of_get_flat_dt_prop(node, "linux,memory-limit", NULL);
 473	if (lprop)
 474		memory_limit = *lprop;
 475
 476#ifdef CONFIG_PPC64
 477	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-start", NULL);
 478	if (lprop)
 479		tce_alloc_start = *lprop;
 480	lprop = of_get_flat_dt_prop(node, "linux,tce-alloc-end", NULL);
 481	if (lprop)
 482		tce_alloc_end = *lprop;
 483#endif
 484
 485#ifdef CONFIG_CRASH_RESERVE
 486	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL);
 487	if (lprop)
 488		crashk_res.start = *lprop;
 489
 490	lprop = of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL);
 491	if (lprop)
 492		crashk_res.end = crashk_res.start + *lprop - 1;
 493#endif
 494
 495	/* break now */
 496	return 1;
 497}
 498
 499/*
 500 * Compare the range against max mem limit and update
 501 * size if it cross the limit.
 502 */
 503
 504#ifdef CONFIG_SPARSEMEM
 505static bool __init validate_mem_limit(u64 base, u64 *size)
 506{
 507	u64 max_mem = 1UL << (MAX_PHYSMEM_BITS);
 508
 509	if (base >= max_mem)
 510		return false;
 511	if ((base + *size) > max_mem)
 512		*size = max_mem - base;
 513	return true;
 514}
 515#else
 516static bool __init validate_mem_limit(u64 base, u64 *size)
 517{
 518	return true;
 519}
 520#endif
 521
 522#ifdef CONFIG_PPC_PSERIES
 523/*
 524 * Interpret the ibm dynamic reconfiguration memory LMBs.
 525 * This contains a list of memory blocks along with NUMA affinity
 526 * information.
 527 */
 528static int  __init early_init_drmem_lmb(struct drmem_lmb *lmb,
 529					const __be32 **usm,
 530					void *data)
 531{
 532	u64 base, size;
 533	int is_kexec_kdump = 0, rngs;
 534
 535	base = lmb->base_addr;
 536	size = drmem_lmb_size();
 537	rngs = 1;
 538
 539	/*
 540	 * Skip this block if the reserved bit is set in flags
 541	 * or if the block is not assigned to this partition.
 542	 */
 543	if ((lmb->flags & DRCONF_MEM_RESERVED) ||
 544	    !(lmb->flags & DRCONF_MEM_ASSIGNED))
 545		return 0;
 546
 547	if (*usm)
 548		is_kexec_kdump = 1;
 549
 550	if (is_kexec_kdump) {
 551		/*
 552		 * For each memblock in ibm,dynamic-memory, a
 553		 * corresponding entry in linux,drconf-usable-memory
 554		 * property contains a counter 'p' followed by 'p'
 555		 * (base, size) duple. Now read the counter from
 556		 * linux,drconf-usable-memory property
 557		 */
 558		rngs = dt_mem_next_cell(dt_root_size_cells, usm);
 559		if (!rngs) /* there are no (base, size) duple */
 560			return 0;
 561	}
 562
 563	do {
 564		if (is_kexec_kdump) {
 565			base = dt_mem_next_cell(dt_root_addr_cells, usm);
 566			size = dt_mem_next_cell(dt_root_size_cells, usm);
 567		}
 568
 569		if (iommu_is_off) {
 570			if (base >= 0x80000000ul)
 571				continue;
 572			if ((base + size) > 0x80000000ul)
 573				size = 0x80000000ul - base;
 574		}
 575
 576		if (!validate_mem_limit(base, &size))
 577			continue;
 578
 579		DBG("Adding: %llx -> %llx\n", base, size);
 580		memblock_add(base, size);
 581
 582		if (lmb->flags & DRCONF_MEM_HOTREMOVABLE)
 583			memblock_mark_hotplug(base, size);
 584	} while (--rngs);
 585
 586	return 0;
 587}
 588#endif /* CONFIG_PPC_PSERIES */
 589
 590static int __init early_init_dt_scan_memory_ppc(void)
 591{
 592#ifdef CONFIG_PPC_PSERIES
 593	const void *fdt = initial_boot_params;
 594	int node = fdt_path_offset(fdt, "/ibm,dynamic-reconfiguration-memory");
 595
 596	if (node > 0)
 597		walk_drmem_lmbs_early(node, NULL, early_init_drmem_lmb);
 598
 599#endif
 600
 601	return early_init_dt_scan_memory();
 602}
 603
 604/*
 605 * For a relocatable kernel, we need to get the memstart_addr first,
 606 * then use it to calculate the virtual kernel start address. This has
 607 * to happen at a very early stage (before machine_init). In this case,
 608 * we just want to get the memstart_address and would not like to mess the
 609 * memblock at this stage. So introduce a variable to skip the memblock_add()
 610 * for this reason.
 611 */
 612#ifdef CONFIG_RELOCATABLE
 613static int add_mem_to_memblock = 1;
 614#else
 615#define add_mem_to_memblock 1
 616#endif
 617
 618void __init early_init_dt_add_memory_arch(u64 base, u64 size)
 619{
 620#ifdef CONFIG_PPC64
 621	if (iommu_is_off) {
 622		if (base >= 0x80000000ul)
 623			return;
 624		if ((base + size) > 0x80000000ul)
 625			size = 0x80000000ul - base;
 626	}
 627#endif
 628	/* Keep track of the beginning of memory -and- the size of
 629	 * the very first block in the device-tree as it represents
 630	 * the RMA on ppc64 server
 631	 */
 632	if (base < memstart_addr) {
 633		memstart_addr = base;
 634		first_memblock_size = size;
 635	}
 636
 637	/* Add the chunk to the MEMBLOCK list */
 638	if (add_mem_to_memblock) {
 639		if (validate_mem_limit(base, &size))
 640			memblock_add(base, size);
 641	}
 642}
 643
 644static void __init early_reserve_mem_dt(void)
 645{
 646	unsigned long i, dt_root;
 647	int len;
 648	const __be32 *prop;
 649
 650	early_init_fdt_reserve_self();
 651	early_init_fdt_scan_reserved_mem();
 652
 653	dt_root = of_get_flat_dt_root();
 654
 655	prop = of_get_flat_dt_prop(dt_root, "reserved-ranges", &len);
 656
 657	if (!prop)
 658		return;
 659
 660	DBG("Found new-style reserved-ranges\n");
 661
 662	/* Each reserved range is an (address,size) pair, 2 cells each,
 663	 * totalling 4 cells per range. */
 664	for (i = 0; i < len / (sizeof(*prop) * 4); i++) {
 665		u64 base, size;
 666
 667		base = of_read_number(prop + (i * 4) + 0, 2);
 668		size = of_read_number(prop + (i * 4) + 2, 2);
 669
 670		if (size) {
 671			DBG("reserving: %llx -> %llx\n", base, size);
 672			memblock_reserve(base, size);
 673		}
 674	}
 675}
 676
 677static void __init early_reserve_mem(void)
 678{
 679	__be64 *reserve_map;
 680
 681	reserve_map = (__be64 *)(((unsigned long)initial_boot_params) +
 682			fdt_off_mem_rsvmap(initial_boot_params));
 683
 684	/* Look for the new "reserved-regions" property in the DT */
 685	early_reserve_mem_dt();
 686
 687#ifdef CONFIG_BLK_DEV_INITRD
 688	/* Then reserve the initrd, if any */
 689	if (initrd_start && (initrd_end > initrd_start)) {
 690		memblock_reserve(ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE),
 691			ALIGN(initrd_end, PAGE_SIZE) -
 692			ALIGN_DOWN(initrd_start, PAGE_SIZE));
 693	}
 694#endif /* CONFIG_BLK_DEV_INITRD */
 695
 696	if (!IS_ENABLED(CONFIG_PPC32))
 697		return;
 698
 699	/* 
 700	 * Handle the case where we might be booting from an old kexec
 701	 * image that setup the mem_rsvmap as pairs of 32-bit values
 702	 */
 703	if (be64_to_cpup(reserve_map) > 0xffffffffull) {
 704		u32 base_32, size_32;
 705		__be32 *reserve_map_32 = (__be32 *)reserve_map;
 706
 707		DBG("Found old 32-bit reserve map\n");
 708
 709		while (1) {
 710			base_32 = be32_to_cpup(reserve_map_32++);
 711			size_32 = be32_to_cpup(reserve_map_32++);
 712			if (size_32 == 0)
 713				break;
 714			DBG("reserving: %x -> %x\n", base_32, size_32);
 715			memblock_reserve(base_32, size_32);
 716		}
 717		return;
 718	}
 719}
 720
 721#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 722static bool tm_disabled __initdata;
 723
 724static int __init parse_ppc_tm(char *str)
 725{
 726	bool res;
 727
 728	if (kstrtobool(str, &res))
 729		return -EINVAL;
 730
 731	tm_disabled = !res;
 732
 733	return 0;
 734}
 735early_param("ppc_tm", parse_ppc_tm);
 736
 737static void __init tm_init(void)
 738{
 739	if (tm_disabled) {
 740		pr_info("Disabling hardware transactional memory (HTM)\n");
 741		cur_cpu_spec->cpu_user_features2 &=
 742			~(PPC_FEATURE2_HTM_NOSC | PPC_FEATURE2_HTM);
 743		cur_cpu_spec->cpu_features &= ~CPU_FTR_TM;
 744		return;
 745	}
 746
 747	pnv_tm_init();
 748}
 749#else
 750static void tm_init(void) { }
 751#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 752
 753static int __init
 754early_init_dt_scan_model(unsigned long node, const char *uname,
 755			 int depth, void *data)
 756{
 757	const char *prop;
 758
 759	if (depth != 0)
 760		return 0;
 761
 762	prop = of_get_flat_dt_prop(node, "model", NULL);
 763	if (prop)
 764		seq_buf_printf(&ppc_hw_desc, "%s ", prop);
 765
 766	/* break now */
 767	return 1;
 768}
 769
 770#ifdef CONFIG_PPC64
 771static void __init save_fscr_to_task(void)
 772{
 773	/*
 774	 * Ensure the init_task (pid 0, aka swapper) uses the value of FSCR we
 775	 * have configured via the device tree features or via __init_FSCR().
 776	 * That value will then be propagated to pid 1 (init) and all future
 777	 * processes.
 778	 */
 779	if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
 780		init_task.thread.fscr = mfspr(SPRN_FSCR);
 781}
 782#else
 783static inline void save_fscr_to_task(void) {}
 784#endif
 785
 786
 787void __init early_init_devtree(void *params)
 788{
 789	phys_addr_t int_vector_size;
 
 790
 791	DBG(" -> early_init_devtree(%px)\n", params);
 792
 793	/* Too early to BUG_ON(), do it by hand */
 794	if (!early_init_dt_verify(params, __pa(params)))
 795		panic("BUG: Failed verifying flat device tree, bad version?");
 796
 797	of_scan_flat_dt(early_init_dt_scan_model, NULL);
 798
 799#ifdef CONFIG_PPC_RTAS
 800	/* Some machines might need RTAS info for debugging, grab it now. */
 801	of_scan_flat_dt(early_init_dt_scan_rtas, NULL);
 802#endif
 803
 804#ifdef CONFIG_PPC_POWERNV
 805	/* Some machines might need OPAL info for debugging, grab it now. */
 806	of_scan_flat_dt(early_init_dt_scan_opal, NULL);
 807
 808	/* Scan tree for ultravisor feature */
 809	of_scan_flat_dt(early_init_dt_scan_ultravisor, NULL);
 810#endif
 811
 812#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
 813	/* scan tree to see if dump is active during last boot */
 814	of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
 815#endif
 816
 817	/* Retrieve various informations from the /chosen node of the
 818	 * device-tree, including the platform type, initrd location and
 819	 * size, TCE reserve, and more ...
 820	 */
 821	of_scan_flat_dt(early_init_dt_scan_chosen_ppc, boot_command_line);
 822
 823	/* Append additional parameters passed for fadump capture kernel */
 824	fadump_append_bootargs();
 825
 826	/* Scan memory nodes and rebuild MEMBLOCKs */
 827	early_init_dt_scan_root();
 828	early_init_dt_scan_memory_ppc();
 829
 830	/*
 831	 * As generic code authors expect to be able to use static keys
 832	 * in early_param() handlers, we initialize the static keys just
 833	 * before parsing early params (it's fine to call jump_label_init()
 834	 * more than once).
 835	 */
 836	jump_label_init();
 837	parse_early_param();
 838
 839	/* make sure we've parsed cmdline for mem= before this */
 840	if (memory_limit)
 841		first_memblock_size = min_t(u64, first_memblock_size, memory_limit);
 842	setup_initial_memory_limit(memstart_addr, first_memblock_size);
 843	/* Reserve MEMBLOCK regions used by kernel, initrd, dt, etc... */
 844	memblock_reserve(PHYSICAL_START, __pa(_end) - PHYSICAL_START);
 845#ifdef CONFIG_PPC64
 846	/* If relocatable, reserve at least 32k for interrupt vectors etc. */
 847	int_vector_size = __end_interrupts - _stext;
 848	int_vector_size = max_t(phys_addr_t, SZ_32K, int_vector_size);
 849#else
 850	/* If relocatable, reserve first 32k for interrupt vectors etc. */
 851	int_vector_size = SZ_32K;
 852#endif
 853	if (PHYSICAL_START > MEMORY_START)
 854		memblock_reserve(MEMORY_START, int_vector_size);
 855	reserve_kdump_trampoline();
 856#if defined(CONFIG_FA_DUMP) || defined(CONFIG_PRESERVE_FA_DUMP)
 857	/*
 858	 * If we fail to reserve memory for firmware-assisted dump then
 859	 * fallback to kexec based kdump.
 860	 */
 861	if (fadump_reserve_mem() == 0)
 862#endif
 863		reserve_crashkernel();
 864	early_reserve_mem();
 865
 866	if (memory_limit > memblock_phys_mem_size())
 867		memory_limit = 0;
 868
 869	/* Align down to 16 MB which is large page size with hash page translation */
 870	memory_limit = ALIGN_DOWN(memory_limit ?: memblock_phys_mem_size(), SZ_16M);
 871	memblock_enforce_memory_limit(memory_limit);
 872
 873#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_4K_PAGES)
 874	if (!early_radix_enabled())
 875		memblock_cap_memory_range(0, 1UL << (H_MAX_PHYSMEM_BITS));
 876#endif
 877
 878	memblock_allow_resize();
 879	memblock_dump_all();
 880
 881	DBG("Phys. mem: %llx\n", (unsigned long long)memblock_phys_mem_size());
 882
 883	/* We may need to relocate the flat tree, do it now.
 884	 * FIXME .. and the initrd too? */
 885	move_device_tree();
 886
 887	DBG("Scanning CPUs ...\n");
 888
 889	dt_cpu_ftrs_scan();
 890
 
 
 
 891	/* Retrieve CPU related informations from the flat tree
 892	 * (altivec support, boot CPU ID, ...)
 893	 */
 894	of_scan_flat_dt(early_init_dt_scan_cpus, NULL);
 895	if (boot_cpuid < 0) {
 896		printk("Failed to identify boot CPU !\n");
 897		BUG();
 898	}
 899
 900	save_fscr_to_task();
 901
 902#if defined(CONFIG_SMP) && defined(CONFIG_PPC64)
 903	/* We'll later wait for secondaries to check in; there are
 904	 * NCPUS-1 non-boot CPUs  :-)
 905	 */
 906	spinning_secondaries = boot_cpu_count - 1;
 907#endif
 908
 909	mmu_early_init_devtree();
 910
 911	/* Setup param area for passing additional parameters to fadump capture kernel. */
 912	fadump_setup_param_area();
 
 
 913
 914#ifdef CONFIG_PPC_POWERNV
 915	/* Scan and build the list of machine check recoverable ranges */
 916	of_scan_flat_dt(early_init_dt_scan_recoverable_ranges, NULL);
 917#endif
 918	epapr_paravirt_early_init();
 919
 920	/* Now try to figure out if we are running on LPAR and so on */
 921	pseries_probe_fw_features();
 922
 923	/*
 924	 * Initialize pkey features and default AMR/IAMR values
 925	 */
 926	pkey_early_init_devtree();
 927
 928#ifdef CONFIG_PPC_PS3
 929	/* Identify PS3 firmware */
 930	if (of_flat_dt_is_compatible(of_get_flat_dt_root(), "sony,ps3"))
 931		powerpc_firmware_features |= FW_FEATURE_PS3_POSSIBLE;
 932#endif
 933
 934	/* If kexec left a PLPKS password in the DT, get it and clear it */
 935	plpks_early_init_devtree();
 936
 937	tm_init();
 938
 939	DBG(" <- early_init_devtree()\n");
 940}
 941
 942#ifdef CONFIG_RELOCATABLE
 943/*
 944 * This function run before early_init_devtree, so we have to init
 945 * initial_boot_params.
 946 */
 947void __init early_get_first_memblock_info(void *params, phys_addr_t *size)
 948{
 949	/* Setup flat device-tree pointer */
 950	initial_boot_params = params;
 951
 952	/*
 953	 * Scan the memory nodes and set add_mem_to_memblock to 0 to avoid
 954	 * mess the memblock.
 955	 */
 956	add_mem_to_memblock = 0;
 957	early_init_dt_scan_root();
 958	early_init_dt_scan_memory_ppc();
 959	add_mem_to_memblock = 1;
 960
 961	if (size)
 962		*size = first_memblock_size;
 963}
 964#endif
 965
 966/*******
 967 *
 968 * New implementation of the OF "find" APIs, return a refcounted
 969 * object, call of_node_put() when done.  The device tree and list
 970 * are protected by a rw_lock.
 971 *
 972 * Note that property management will need some locking as well,
 973 * this isn't dealt with yet.
 974 *
 975 *******/
 976
 977/**
 978 * of_get_ibm_chip_id - Returns the IBM "chip-id" of a device
 979 * @np: device node of the device
 980 *
 981 * This looks for a property "ibm,chip-id" in the node or any
 982 * of its parents and returns its content, or -1 if it cannot
 983 * be found.
 984 */
 985int of_get_ibm_chip_id(struct device_node *np)
 986{
 987	of_node_get(np);
 988	while (np) {
 989		u32 chip_id;
 990
 991		/*
 992		 * Skiboot may produce memory nodes that contain more than one
 993		 * cell in chip-id, we only read the first one here.
 994		 */
 995		if (!of_property_read_u32(np, "ibm,chip-id", &chip_id)) {
 996			of_node_put(np);
 997			return chip_id;
 998		}
 999
1000		np = of_get_next_parent(np);
1001	}
1002	return -1;
1003}
1004EXPORT_SYMBOL(of_get_ibm_chip_id);
1005
1006/**
1007 * cpu_to_chip_id - Return the cpus chip-id
1008 * @cpu: The logical cpu number.
1009 *
1010 * Return the value of the ibm,chip-id property corresponding to the given
1011 * logical cpu number. If the chip-id can not be found, returns -1.
1012 */
1013int cpu_to_chip_id(int cpu)
1014{
1015	struct device_node *np;
1016	int ret = -1, idx;
1017
1018	idx = cpu / threads_per_core;
1019	if (chip_id_lookup_table && chip_id_lookup_table[idx] != -1)
1020		return chip_id_lookup_table[idx];
1021
1022	np = of_get_cpu_node(cpu, NULL);
1023	if (np) {
1024		ret = of_get_ibm_chip_id(np);
1025		of_node_put(np);
1026
1027		if (chip_id_lookup_table)
1028			chip_id_lookup_table[idx] = ret;
1029	}
1030
1031	return ret;
1032}
1033EXPORT_SYMBOL(cpu_to_chip_id);
1034
1035bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
1036{
1037#ifdef CONFIG_SMP
1038	/*
1039	 * Early firmware scanning must use this rather than
1040	 * get_hard_smp_processor_id because we don't have pacas allocated
1041	 * until memory topology is discovered.
1042	 */
1043	if (cpu_to_phys_id != NULL)
1044		return (int)phys_id == cpu_to_phys_id[cpu];
1045#endif
1046
1047	return (int)phys_id == get_hard_smp_processor_id(cpu);
1048}