Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions for working with the Flattened Device Tree data format
   4 *
   5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
   6 * benh@kernel.crashing.org
   7 */
   8
   9#define pr_fmt(fmt)	"OF: fdt: " fmt
  10
 
  11#include <linux/crash_dump.h>
  12#include <linux/crc32.h>
  13#include <linux/kernel.h>
  14#include <linux/initrd.h>
  15#include <linux/memblock.h>
  16#include <linux/mutex.h>
  17#include <linux/of.h>
  18#include <linux/of_fdt.h>
  19#include <linux/sizes.h>
  20#include <linux/string.h>
  21#include <linux/errno.h>
  22#include <linux/slab.h>
  23#include <linux/libfdt.h>
  24#include <linux/debugfs.h>
  25#include <linux/serial_core.h>
  26#include <linux/sysfs.h>
  27#include <linux/random.h>
  28
  29#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
  30#include <asm/page.h>
  31
  32#include "of_private.h"
  33
  34/*
  35 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
  36 * cmd_wrap_S_dtb in scripts/Makefile.dtbs
  37 */
  38extern uint8_t __dtb_empty_root_begin[];
  39extern uint8_t __dtb_empty_root_end[];
  40
  41/*
  42 * of_fdt_limit_memory - limit the number of regions in the /memory node
  43 * @limit: maximum entries
  44 *
  45 * Adjust the flattened device tree to have at most 'limit' number of
  46 * memory entries in the /memory node. This function may be called
  47 * any time after initial_boot_param is set.
  48 */
  49void __init of_fdt_limit_memory(int limit)
  50{
  51	int memory;
  52	int len;
  53	const void *val;
  54	int cell_size = sizeof(uint32_t)*(dt_root_addr_cells + dt_root_size_cells);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  55
  56	memory = fdt_path_offset(initial_boot_params, "/memory");
  57	if (memory > 0) {
  58		val = fdt_getprop(initial_boot_params, memory, "reg", &len);
  59		if (len > limit*cell_size) {
  60			len = limit*cell_size;
  61			pr_debug("Limiting number of entries to %d\n", limit);
  62			fdt_setprop(initial_boot_params, memory, "reg", val,
  63					len);
  64		}
  65	}
  66}
  67
  68bool of_fdt_device_is_available(const void *blob, unsigned long node)
  69{
  70	const char *status = fdt_getprop(blob, node, "status", NULL);
  71
  72	if (!status)
  73		return true;
  74
  75	if (!strcmp(status, "ok") || !strcmp(status, "okay"))
  76		return true;
  77
  78	return false;
  79}
  80
  81static void *unflatten_dt_alloc(void **mem, unsigned long size,
  82				       unsigned long align)
  83{
  84	void *res;
  85
  86	*mem = PTR_ALIGN(*mem, align);
  87	res = *mem;
  88	*mem += size;
  89
  90	return res;
  91}
  92
  93static void populate_properties(const void *blob,
  94				int offset,
  95				void **mem,
  96				struct device_node *np,
  97				const char *nodename,
  98				bool dryrun)
  99{
 100	struct property *pp, **pprev = NULL;
 101	int cur;
 102	bool has_name = false;
 103
 104	pprev = &np->properties;
 105	for (cur = fdt_first_property_offset(blob, offset);
 106	     cur >= 0;
 107	     cur = fdt_next_property_offset(blob, cur)) {
 108		const __be32 *val;
 109		const char *pname;
 110		u32 sz;
 111
 112		val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
 113		if (!val) {
 114			pr_warn("Cannot locate property at 0x%x\n", cur);
 115			continue;
 116		}
 117
 118		if (!pname) {
 119			pr_warn("Cannot find property name at 0x%x\n", cur);
 120			continue;
 121		}
 122
 123		if (!strcmp(pname, "name"))
 124			has_name = true;
 125
 126		pp = unflatten_dt_alloc(mem, sizeof(struct property),
 127					__alignof__(struct property));
 128		if (dryrun)
 129			continue;
 130
 131		/* We accept flattened tree phandles either in
 132		 * ePAPR-style "phandle" properties, or the
 133		 * legacy "linux,phandle" properties.  If both
 134		 * appear and have different values, things
 135		 * will get weird. Don't do that.
 136		 */
 137		if (!strcmp(pname, "phandle") ||
 138		    !strcmp(pname, "linux,phandle")) {
 139			if (!np->phandle)
 140				np->phandle = be32_to_cpup(val);
 141		}
 142
 143		/* And we process the "ibm,phandle" property
 144		 * used in pSeries dynamic device tree
 145		 * stuff
 146		 */
 147		if (!strcmp(pname, "ibm,phandle"))
 148			np->phandle = be32_to_cpup(val);
 149
 150		pp->name   = (char *)pname;
 151		pp->length = sz;
 152		pp->value  = (__be32 *)val;
 153		*pprev     = pp;
 154		pprev      = &pp->next;
 155	}
 156
 157	/* With version 0x10 we may not have the name property,
 158	 * recreate it here from the unit name if absent
 159	 */
 160	if (!has_name) {
 161		const char *p = nodename, *ps = p, *pa = NULL;
 162		int len;
 163
 164		while (*p) {
 165			if ((*p) == '@')
 166				pa = p;
 167			else if ((*p) == '/')
 168				ps = p + 1;
 169			p++;
 170		}
 171
 172		if (pa < ps)
 173			pa = p;
 174		len = (pa - ps) + 1;
 175		pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
 176					__alignof__(struct property));
 177		if (!dryrun) {
 178			pp->name   = "name";
 179			pp->length = len;
 180			pp->value  = pp + 1;
 181			*pprev     = pp;
 182			memcpy(pp->value, ps, len - 1);
 183			((char *)pp->value)[len - 1] = 0;
 184			pr_debug("fixed up name for %s -> %s\n",
 185				 nodename, (char *)pp->value);
 186		}
 187	}
 188}
 189
 190static int populate_node(const void *blob,
 191			  int offset,
 192			  void **mem,
 193			  struct device_node *dad,
 194			  struct device_node **pnp,
 195			  bool dryrun)
 196{
 197	struct device_node *np;
 198	const char *pathp;
 199	int len;
 200
 201	pathp = fdt_get_name(blob, offset, &len);
 202	if (!pathp) {
 203		*pnp = NULL;
 204		return len;
 205	}
 206
 207	len++;
 208
 209	np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
 210				__alignof__(struct device_node));
 211	if (!dryrun) {
 212		char *fn;
 213		of_node_init(np);
 214		np->full_name = fn = ((char *)np) + sizeof(*np);
 215
 216		memcpy(fn, pathp, len);
 217
 218		if (dad != NULL) {
 219			np->parent = dad;
 220			np->sibling = dad->child;
 221			dad->child = np;
 222		}
 223	}
 224
 225	populate_properties(blob, offset, mem, np, pathp, dryrun);
 226	if (!dryrun) {
 227		np->name = of_get_property(np, "name", NULL);
 228		if (!np->name)
 229			np->name = "<NULL>";
 230	}
 231
 232	*pnp = np;
 233	return 0;
 234}
 235
 236static void reverse_nodes(struct device_node *parent)
 237{
 238	struct device_node *child, *next;
 239
 240	/* In-depth first */
 241	child = parent->child;
 242	while (child) {
 243		reverse_nodes(child);
 244
 245		child = child->sibling;
 246	}
 247
 248	/* Reverse the nodes in the child list */
 249	child = parent->child;
 250	parent->child = NULL;
 251	while (child) {
 252		next = child->sibling;
 253
 254		child->sibling = parent->child;
 255		parent->child = child;
 256		child = next;
 257	}
 258}
 259
 260/**
 261 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
 262 * @blob: The parent device tree blob
 263 * @mem: Memory chunk to use for allocating device nodes and properties
 264 * @dad: Parent struct device_node
 265 * @nodepp: The device_node tree created by the call
 266 *
 267 * Return: The size of unflattened device tree or error code
 268 */
 269static int unflatten_dt_nodes(const void *blob,
 270			      void *mem,
 271			      struct device_node *dad,
 272			      struct device_node **nodepp)
 273{
 274	struct device_node *root;
 275	int offset = 0, depth = 0, initial_depth = 0;
 276#define FDT_MAX_DEPTH	64
 277	struct device_node *nps[FDT_MAX_DEPTH];
 278	void *base = mem;
 279	bool dryrun = !base;
 280	int ret;
 281
 282	if (nodepp)
 283		*nodepp = NULL;
 284
 285	/*
 286	 * We're unflattening device sub-tree if @dad is valid. There are
 287	 * possibly multiple nodes in the first level of depth. We need
 288	 * set @depth to 1 to make fdt_next_node() happy as it bails
 289	 * immediately when negative @depth is found. Otherwise, the device
 290	 * nodes except the first one won't be unflattened successfully.
 291	 */
 292	if (dad)
 293		depth = initial_depth = 1;
 294
 295	root = dad;
 296	nps[depth] = dad;
 297
 298	for (offset = 0;
 299	     offset >= 0 && depth >= initial_depth;
 300	     offset = fdt_next_node(blob, offset, &depth)) {
 301		if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
 302			continue;
 303
 304		if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
 305		    !of_fdt_device_is_available(blob, offset))
 306			continue;
 307
 308		ret = populate_node(blob, offset, &mem, nps[depth],
 309				   &nps[depth+1], dryrun);
 310		if (ret < 0)
 311			return ret;
 312
 313		if (!dryrun && nodepp && !*nodepp)
 314			*nodepp = nps[depth+1];
 315		if (!dryrun && !root)
 316			root = nps[depth+1];
 317	}
 318
 319	if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
 320		pr_err("Error %d processing FDT\n", offset);
 321		return -EINVAL;
 322	}
 323
 324	/*
 325	 * Reverse the child list. Some drivers assumes node order matches .dts
 326	 * node order
 327	 */
 328	if (!dryrun)
 329		reverse_nodes(root);
 330
 331	return mem - base;
 332}
 333
 334/**
 335 * __unflatten_device_tree - create tree of device_nodes from flat blob
 336 * @blob: The blob to expand
 337 * @dad: Parent device node
 338 * @mynodes: The device_node tree created by the call
 339 * @dt_alloc: An allocator that provides a virtual address to memory
 340 * for the resulting tree
 341 * @detached: if true set OF_DETACHED on @mynodes
 342 *
 343 * unflattens a device-tree, creating the tree of struct device_node. It also
 344 * fills the "name" and "type" pointers of the nodes so the normal device-tree
 345 * walking functions can be used.
 346 *
 347 * Return: NULL on failure or the memory chunk containing the unflattened
 348 * device tree on success.
 349 */
 350void *__unflatten_device_tree(const void *blob,
 351			      struct device_node *dad,
 352			      struct device_node **mynodes,
 353			      void *(*dt_alloc)(u64 size, u64 align),
 354			      bool detached)
 355{
 356	int size;
 357	void *mem;
 358	int ret;
 359
 360	if (mynodes)
 361		*mynodes = NULL;
 362
 363	pr_debug(" -> unflatten_device_tree()\n");
 364
 365	if (!blob) {
 366		pr_debug("No device tree pointer\n");
 367		return NULL;
 368	}
 369
 370	pr_debug("Unflattening device tree:\n");
 371	pr_debug("magic: %08x\n", fdt_magic(blob));
 372	pr_debug("size: %08x\n", fdt_totalsize(blob));
 373	pr_debug("version: %08x\n", fdt_version(blob));
 374
 375	if (fdt_check_header(blob)) {
 376		pr_err("Invalid device tree blob header\n");
 377		return NULL;
 378	}
 379
 380	/* First pass, scan for size */
 381	size = unflatten_dt_nodes(blob, NULL, dad, NULL);
 382	if (size <= 0)
 383		return NULL;
 384
 385	size = ALIGN(size, 4);
 386	pr_debug("  size is %d, allocating...\n", size);
 387
 388	/* Allocate memory for the expanded device tree */
 389	mem = dt_alloc(size + 4, __alignof__(struct device_node));
 390	if (!mem)
 391		return NULL;
 392
 393	memset(mem, 0, size);
 394
 395	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
 396
 397	pr_debug("  unflattening %p...\n", mem);
 398
 399	/* Second pass, do actual unflattening */
 400	ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
 401
 402	if (be32_to_cpup(mem + size) != 0xdeadbeef)
 403		pr_warn("End of tree marker overwritten: %08x\n",
 404			be32_to_cpup(mem + size));
 405
 406	if (ret <= 0)
 407		return NULL;
 408
 409	if (detached && mynodes && *mynodes) {
 410		of_node_set_flag(*mynodes, OF_DETACHED);
 411		pr_debug("unflattened tree is detached\n");
 412	}
 413
 414	pr_debug(" <- unflatten_device_tree()\n");
 415	return mem;
 416}
 417
 418static void *kernel_tree_alloc(u64 size, u64 align)
 419{
 420	return kzalloc(size, GFP_KERNEL);
 421}
 422
 423static DEFINE_MUTEX(of_fdt_unflatten_mutex);
 424
 425/**
 426 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
 427 * @blob: Flat device tree blob
 428 * @dad: Parent device node
 429 * @mynodes: The device tree created by the call
 430 *
 431 * unflattens the device-tree passed by the firmware, creating the
 432 * tree of struct device_node. It also fills the "name" and "type"
 433 * pointers of the nodes so the normal device-tree walking functions
 434 * can be used.
 435 *
 436 * Return: NULL on failure or the memory chunk containing the unflattened
 437 * device tree on success.
 438 */
 439void *of_fdt_unflatten_tree(const unsigned long *blob,
 440			    struct device_node *dad,
 441			    struct device_node **mynodes)
 442{
 443	void *mem;
 444
 445	mutex_lock(&of_fdt_unflatten_mutex);
 446	mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
 447				      true);
 448	mutex_unlock(&of_fdt_unflatten_mutex);
 449
 450	return mem;
 451}
 452EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
 453
 454/* Everything below here references initial_boot_params directly. */
 455int __initdata dt_root_addr_cells;
 456int __initdata dt_root_size_cells;
 457
 458void *initial_boot_params __ro_after_init;
 459phys_addr_t initial_boot_params_pa __ro_after_init;
 460
 461#ifdef CONFIG_OF_EARLY_FLATTREE
 462
 463static u32 of_fdt_crc32;
 464
 465/*
 466 * fdt_reserve_elfcorehdr() - reserves memory for elf core header
 467 *
 468 * This function reserves the memory occupied by an elf core header
 469 * described in the device tree. This region contains all the
 470 * information about primary kernel's core image and is used by a dump
 471 * capture kernel to access the system memory on primary kernel.
 472 */
 473static void __init fdt_reserve_elfcorehdr(void)
 474{
 475	if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
 476		return;
 477
 478	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
 479		pr_warn("elfcorehdr is overlapped\n");
 480		return;
 481	}
 482
 483	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
 484
 485	pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
 486		elfcorehdr_size >> 10, elfcorehdr_addr);
 487}
 488
 489/**
 490 * early_init_fdt_scan_reserved_mem() - create reserved memory regions
 491 *
 492 * This function grabs memory from early allocator for device exclusive use
 493 * defined in device tree structures. It should be called by arch specific code
 494 * once the early allocator (i.e. memblock) has been fully activated.
 495 */
 496void __init early_init_fdt_scan_reserved_mem(void)
 497{
 498	int n;
 499	u64 base, size;
 500
 501	if (!initial_boot_params)
 502		return;
 503
 504	fdt_scan_reserved_mem();
 505	fdt_reserve_elfcorehdr();
 506
 507	/* Process header /memreserve/ fields */
 508	for (n = 0; ; n++) {
 509		fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
 510		if (!size)
 511			break;
 512		memblock_reserve(base, size);
 513	}
 
 
 514}
 515
 516/**
 517 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
 518 */
 519void __init early_init_fdt_reserve_self(void)
 520{
 521	if (!initial_boot_params)
 522		return;
 523
 524	/* Reserve the dtb region */
 525	memblock_reserve(__pa(initial_boot_params),
 526			 fdt_totalsize(initial_boot_params));
 527}
 528
 529/**
 530 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
 531 * @it: callback function
 532 * @data: context data pointer
 533 *
 534 * This function is used to scan the flattened device-tree, it is
 535 * used to extract the memory information at boot before we can
 536 * unflatten the tree
 537 */
 538int __init of_scan_flat_dt(int (*it)(unsigned long node,
 539				     const char *uname, int depth,
 540				     void *data),
 541			   void *data)
 542{
 543	const void *blob = initial_boot_params;
 544	const char *pathp;
 545	int offset, rc = 0, depth = -1;
 546
 547	if (!blob)
 548		return 0;
 549
 550	for (offset = fdt_next_node(blob, -1, &depth);
 551	     offset >= 0 && depth >= 0 && !rc;
 552	     offset = fdt_next_node(blob, offset, &depth)) {
 553
 554		pathp = fdt_get_name(blob, offset, NULL);
 555		rc = it(offset, pathp, depth, data);
 556	}
 557	return rc;
 558}
 559
 560/**
 561 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
 562 * @parent: parent node
 563 * @it: callback function
 564 * @data: context data pointer
 565 *
 566 * This function is used to scan sub-nodes of a node.
 567 */
 568int __init of_scan_flat_dt_subnodes(unsigned long parent,
 569				    int (*it)(unsigned long node,
 570					      const char *uname,
 571					      void *data),
 572				    void *data)
 573{
 574	const void *blob = initial_boot_params;
 575	int node;
 576
 577	fdt_for_each_subnode(node, blob, parent) {
 578		const char *pathp;
 579		int rc;
 580
 581		pathp = fdt_get_name(blob, node, NULL);
 582		rc = it(node, pathp, data);
 583		if (rc)
 584			return rc;
 585	}
 586	return 0;
 587}
 588
 589/**
 590 * of_get_flat_dt_subnode_by_name - get the subnode by given name
 591 *
 592 * @node: the parent node
 593 * @uname: the name of subnode
 594 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
 595 */
 596
 597int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
 598{
 599	return fdt_subnode_offset(initial_boot_params, node, uname);
 600}
 601
 602/*
 603 * of_get_flat_dt_root - find the root node in the flat blob
 604 */
 605unsigned long __init of_get_flat_dt_root(void)
 606{
 607	return 0;
 608}
 609
 610/*
 611 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
 612 *
 613 * This function can be used within scan_flattened_dt callback to get
 614 * access to properties
 615 */
 616const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
 617				       int *size)
 618{
 619	return fdt_getprop(initial_boot_params, node, name, size);
 620}
 621
 622/**
 623 * of_fdt_is_compatible - Return true if given node from the given blob has
 624 * compat in its compatible list
 625 * @blob: A device tree blob
 626 * @node: node to test
 627 * @compat: compatible string to compare with compatible list.
 628 *
 629 * Return: a non-zero value on match with smaller values returned for more
 630 * specific compatible values.
 631 */
 632static int of_fdt_is_compatible(const void *blob,
 633		      unsigned long node, const char *compat)
 634{
 635	const char *cp;
 636	int cplen;
 637	unsigned long l, score = 0;
 638
 639	cp = fdt_getprop(blob, node, "compatible", &cplen);
 640	if (cp == NULL)
 641		return 0;
 642	while (cplen > 0) {
 643		score++;
 644		if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
 645			return score;
 646		l = strlen(cp) + 1;
 647		cp += l;
 648		cplen -= l;
 649	}
 650
 651	return 0;
 652}
 653
 654/**
 655 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
 656 * @node: node to test
 657 * @compat: compatible string to compare with compatible list.
 658 */
 659int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
 660{
 661	return of_fdt_is_compatible(initial_boot_params, node, compat);
 662}
 663
 664/*
 665 * of_flat_dt_match - Return true if node matches a list of compatible values
 666 */
 667static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
 668{
 669	unsigned int tmp, score = 0;
 670
 671	if (!compat)
 672		return 0;
 673
 674	while (*compat) {
 675		tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
 676		if (tmp && (score == 0 || (tmp < score)))
 677			score = tmp;
 678		compat++;
 679	}
 680
 681	return score;
 682}
 683
 684/*
 685 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
 686 */
 687uint32_t __init of_get_flat_dt_phandle(unsigned long node)
 688{
 689	return fdt_get_phandle(initial_boot_params, node);
 690}
 691
 692const char * __init of_flat_dt_get_machine_name(void)
 693{
 694	const char *name;
 695	unsigned long dt_root = of_get_flat_dt_root();
 696
 697	name = of_get_flat_dt_prop(dt_root, "model", NULL);
 698	if (!name)
 699		name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
 700	return name;
 701}
 702
 703/**
 704 * of_flat_dt_match_machine - Iterate match tables to find matching machine.
 705 *
 706 * @default_match: A machine specific ptr to return in case of no match.
 707 * @get_next_compat: callback function to return next compatible match table.
 708 *
 709 * Iterate through machine match tables to find the best match for the machine
 710 * compatible string in the FDT.
 711 */
 712const void * __init of_flat_dt_match_machine(const void *default_match,
 713		const void * (*get_next_compat)(const char * const**))
 714{
 715	const void *data = NULL;
 716	const void *best_data = default_match;
 717	const char *const *compat;
 718	unsigned long dt_root;
 719	unsigned int best_score = ~1, score = 0;
 720
 721	dt_root = of_get_flat_dt_root();
 722	while ((data = get_next_compat(&compat))) {
 723		score = of_flat_dt_match(dt_root, compat);
 724		if (score > 0 && score < best_score) {
 725			best_data = data;
 726			best_score = score;
 727		}
 728	}
 729	if (!best_data) {
 730		const char *prop;
 731		int size;
 732
 733		pr_err("\n unrecognized device tree list:\n[ ");
 734
 735		prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
 736		if (prop) {
 737			while (size > 0) {
 738				printk("'%s' ", prop);
 739				size -= strlen(prop) + 1;
 740				prop += strlen(prop) + 1;
 741			}
 742		}
 743		printk("]\n\n");
 744		return NULL;
 745	}
 746
 747	pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
 748
 749	return best_data;
 750}
 751
 752static void __early_init_dt_declare_initrd(unsigned long start,
 753					   unsigned long end)
 754{
 755	/*
 756	 * __va() is not yet available this early on some platforms. In that
 757	 * case, the platform uses phys_initrd_start/phys_initrd_size instead
 758	 * and does the VA conversion itself.
 759	 */
 760	if (!IS_ENABLED(CONFIG_ARM64) &&
 761	    !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
 762		initrd_start = (unsigned long)__va(start);
 763		initrd_end = (unsigned long)__va(end);
 764		initrd_below_start_ok = 1;
 765	}
 766}
 767
 768/**
 769 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
 770 * @node: reference to node containing initrd location ('chosen')
 771 */
 772static void __init early_init_dt_check_for_initrd(unsigned long node)
 773{
 774	u64 start, end;
 775	int len;
 776	const __be32 *prop;
 777
 778	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
 779		return;
 780
 781	pr_debug("Looking for initrd properties... ");
 782
 783	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
 784	if (!prop)
 785		return;
 786	start = of_read_number(prop, len/4);
 787
 788	prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
 789	if (!prop)
 790		return;
 791	end = of_read_number(prop, len/4);
 792	if (start > end)
 793		return;
 794
 795	__early_init_dt_declare_initrd(start, end);
 796	phys_initrd_start = start;
 797	phys_initrd_size = end - start;
 798
 799	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
 800}
 801
 802/**
 803 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
 804 * tree
 805 * @node: reference to node containing elfcorehdr location ('chosen')
 806 */
 807static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
 808{
 809	const __be32 *prop;
 810	int len;
 811
 812	if (!IS_ENABLED(CONFIG_CRASH_DUMP))
 813		return;
 814
 815	pr_debug("Looking for elfcorehdr property... ");
 816
 817	prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
 818	if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
 819		return;
 820
 821	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
 822	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
 823
 824	pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
 825		 elfcorehdr_addr, elfcorehdr_size);
 826}
 827
 828static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
 829
 830/*
 831 * The main usage of linux,usable-memory-range is for crash dump kernel.
 832 * Originally, the number of usable-memory regions is one. Now there may
 833 * be two regions, low region and high region.
 834 * To make compatibility with existing user-space and older kdump, the low
 835 * region is always the last range of linux,usable-memory-range if exist.
 836 */
 837#define MAX_USABLE_RANGES		2
 838
 839/**
 840 * early_init_dt_check_for_usable_mem_range - Decode usable memory range
 841 * location from flat tree
 842 */
 843void __init early_init_dt_check_for_usable_mem_range(void)
 844{
 845	struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
 846	const __be32 *prop, *endp;
 847	int len, i;
 848	unsigned long node = chosen_node_offset;
 849
 850	if ((long)node < 0)
 851		return;
 852
 853	pr_debug("Looking for usable-memory-range property... ");
 854
 855	prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
 856	if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
 857		return;
 858
 859	endp = prop + (len / sizeof(__be32));
 860	for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
 861		rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
 862		rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
 863
 864		pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
 865			 i, &rgn[i].base, &rgn[i].size);
 866	}
 867
 868	memblock_cap_memory_range(rgn[0].base, rgn[0].size);
 869	for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
 870		memblock_add(rgn[i].base, rgn[i].size);
 871}
 872
 873#ifdef CONFIG_SERIAL_EARLYCON
 874
 875int __init early_init_dt_scan_chosen_stdout(void)
 876{
 877	int offset;
 878	const char *p, *q, *options = NULL;
 879	int l;
 880	const struct earlycon_id *match;
 881	const void *fdt = initial_boot_params;
 882	int ret;
 883
 884	offset = fdt_path_offset(fdt, "/chosen");
 885	if (offset < 0)
 886		offset = fdt_path_offset(fdt, "/chosen@0");
 887	if (offset < 0)
 888		return -ENOENT;
 889
 890	p = fdt_getprop(fdt, offset, "stdout-path", &l);
 891	if (!p)
 892		p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
 893	if (!p || !l)
 894		return -ENOENT;
 895
 896	q = strchrnul(p, ':');
 897	if (*q != '\0')
 898		options = q + 1;
 899	l = q - p;
 900
 901	/* Get the node specified by stdout-path */
 902	offset = fdt_path_offset_namelen(fdt, p, l);
 903	if (offset < 0) {
 904		pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
 905		return 0;
 906	}
 907
 908	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
 909		if (!match->compatible[0])
 910			continue;
 911
 912		if (fdt_node_check_compatible(fdt, offset, match->compatible))
 913			continue;
 914
 915		ret = of_setup_earlycon(match, offset, options);
 916		if (!ret || ret == -EALREADY)
 917			return 0;
 918	}
 919	return -ENODEV;
 920}
 921#endif
 922
 923/*
 924 * early_init_dt_scan_root - fetch the top level address and size cells
 925 */
 926int __init early_init_dt_scan_root(void)
 927{
 928	const __be32 *prop;
 929	const void *fdt = initial_boot_params;
 930	int node = fdt_path_offset(fdt, "/");
 931
 932	if (node < 0)
 933		return -ENODEV;
 934
 935	dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
 936	dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
 937
 938	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
 939	if (!WARN(!prop, "No '#size-cells' in root node\n"))
 940		dt_root_size_cells = be32_to_cpup(prop);
 941	pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
 942
 943	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
 944	if (!WARN(!prop, "No '#address-cells' in root node\n"))
 945		dt_root_addr_cells = be32_to_cpup(prop);
 946	pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
 947
 948	return 0;
 949}
 950
 951u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
 952{
 953	const __be32 *p = *cellp;
 954
 955	*cellp = p + s;
 956	return of_read_number(p, s);
 957}
 958
 959/*
 960 * early_init_dt_scan_memory - Look for and parse memory nodes
 961 */
 962int __init early_init_dt_scan_memory(void)
 963{
 964	int node, found_memory = 0;
 965	const void *fdt = initial_boot_params;
 966
 967	fdt_for_each_subnode(node, fdt, 0) {
 968		const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 969		const __be32 *reg, *endp;
 970		int l;
 971		bool hotpluggable;
 972
 973		/* We are scanning "memory" nodes only */
 974		if (type == NULL || strcmp(type, "memory") != 0)
 975			continue;
 976
 977		if (!of_fdt_device_is_available(fdt, node))
 978			continue;
 979
 980		reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
 981		if (reg == NULL)
 982			reg = of_get_flat_dt_prop(node, "reg", &l);
 983		if (reg == NULL)
 984			continue;
 985
 986		endp = reg + (l / sizeof(__be32));
 987		hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
 988
 989		pr_debug("memory scan node %s, reg size %d,\n",
 990			 fdt_get_name(fdt, node, NULL), l);
 991
 992		while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
 993			u64 base, size;
 994
 995			base = dt_mem_next_cell(dt_root_addr_cells, &reg);
 996			size = dt_mem_next_cell(dt_root_size_cells, &reg);
 997
 998			if (size == 0)
 999				continue;
1000			pr_debug(" - %llx, %llx\n", base, size);
1001
1002			early_init_dt_add_memory_arch(base, size);
1003
1004			found_memory = 1;
1005
1006			if (!hotpluggable)
1007				continue;
1008
1009			if (memblock_mark_hotplug(base, size))
1010				pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1011					base, base + size);
1012		}
1013	}
1014	return found_memory;
1015}
1016
1017int __init early_init_dt_scan_chosen(char *cmdline)
1018{
1019	int l, node;
1020	const char *p;
1021	const void *rng_seed;
1022	const void *fdt = initial_boot_params;
1023
1024	node = fdt_path_offset(fdt, "/chosen");
1025	if (node < 0)
1026		node = fdt_path_offset(fdt, "/chosen@0");
1027	if (node < 0)
1028		/* Handle the cmdline config options even if no /chosen node */
1029		goto handle_cmdline;
1030
1031	chosen_node_offset = node;
1032
1033	early_init_dt_check_for_initrd(node);
1034	early_init_dt_check_for_elfcorehdr(node);
1035
1036	rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1037	if (rng_seed && l > 0) {
1038		add_bootloader_randomness(rng_seed, l);
1039
1040		/* try to clear seed so it won't be found. */
1041		fdt_nop_property(initial_boot_params, node, "rng-seed");
1042
1043		/* update CRC check value */
1044		of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1045				fdt_totalsize(initial_boot_params));
1046	}
1047
1048	/* Retrieve command line */
1049	p = of_get_flat_dt_prop(node, "bootargs", &l);
1050	if (p != NULL && l > 0)
1051		strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1052
1053handle_cmdline:
1054	/*
1055	 * CONFIG_CMDLINE is meant to be a default in case nothing else
1056	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1057	 * is set in which case we override whatever was found earlier.
1058	 */
1059#ifdef CONFIG_CMDLINE
1060#if defined(CONFIG_CMDLINE_EXTEND)
1061	strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1062	strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1063#elif defined(CONFIG_CMDLINE_FORCE)
1064	strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1065#else
1066	/* No arguments from boot loader, use kernel's  cmdl*/
1067	if (!((char *)cmdline)[0])
1068		strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1069#endif
1070#endif /* CONFIG_CMDLINE */
1071
1072	pr_debug("Command line is: %s\n", (char *)cmdline);
1073
1074	return 0;
1075}
1076
1077#ifndef MIN_MEMBLOCK_ADDR
1078#define MIN_MEMBLOCK_ADDR	__pa(PAGE_OFFSET)
1079#endif
1080#ifndef MAX_MEMBLOCK_ADDR
1081#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
1082#endif
1083
1084void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1085{
1086	const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1087
1088	if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1089		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1090			base, base + size);
1091		return;
1092	}
1093
1094	if (!PAGE_ALIGNED(base)) {
1095		size -= PAGE_SIZE - (base & ~PAGE_MASK);
1096		base = PAGE_ALIGN(base);
1097	}
1098	size &= PAGE_MASK;
1099
1100	if (base > MAX_MEMBLOCK_ADDR) {
1101		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1102			base, base + size);
1103		return;
1104	}
1105
1106	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1107		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1108			((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1109		size = MAX_MEMBLOCK_ADDR - base + 1;
1110	}
1111
1112	if (base + size < phys_offset) {
1113		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1114			base, base + size);
1115		return;
1116	}
1117	if (base < phys_offset) {
1118		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1119			base, phys_offset);
1120		size -= phys_offset - base;
1121		base = phys_offset;
1122	}
1123	memblock_add(base, size);
1124}
1125
1126static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1127{
1128	void *ptr = memblock_alloc(size, align);
1129
1130	if (!ptr)
1131		panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1132		      __func__, size, align);
1133
1134	return ptr;
1135}
1136
1137bool __init early_init_dt_verify(void *dt_virt, phys_addr_t dt_phys)
1138{
1139	if (!dt_virt)
1140		return false;
1141
1142	/* check device tree validity */
1143	if (fdt_check_header(dt_virt))
1144		return false;
1145
1146	/* Setup flat device-tree pointer */
1147	initial_boot_params = dt_virt;
1148	initial_boot_params_pa = dt_phys;
1149	of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1150				fdt_totalsize(initial_boot_params));
1151
1152	/* Initialize {size,address}-cells info */
1153	early_init_dt_scan_root();
1154
1155	return true;
1156}
1157
1158
1159void __init early_init_dt_scan_nodes(void)
1160{
1161	int rc;
1162
 
 
 
1163	/* Retrieve various information from the /chosen node */
1164	rc = early_init_dt_scan_chosen(boot_command_line);
1165	if (rc)
1166		pr_warn("No chosen node found, continuing without\n");
1167
1168	/* Setup memory, calling early_init_dt_add_memory_arch */
1169	early_init_dt_scan_memory();
1170
1171	/* Handle linux,usable-memory-range property */
1172	early_init_dt_check_for_usable_mem_range();
1173}
1174
1175bool __init early_init_dt_scan(void *dt_virt, phys_addr_t dt_phys)
1176{
1177	bool status;
1178
1179	status = early_init_dt_verify(dt_virt, dt_phys);
1180	if (!status)
1181		return false;
1182
1183	early_init_dt_scan_nodes();
1184	return true;
1185}
1186
1187static void *__init copy_device_tree(void *fdt)
1188{
1189	int size;
1190	void *dt;
1191
1192	size = fdt_totalsize(fdt);
1193	dt = early_init_dt_alloc_memory_arch(size,
1194					     roundup_pow_of_two(FDT_V17_SIZE));
1195
1196	if (dt)
1197		memcpy(dt, fdt, size);
1198
1199	return dt;
1200}
1201
1202/**
1203 * unflatten_device_tree - create tree of device_nodes from flat blob
1204 *
1205 * unflattens the device-tree passed by the firmware, creating the
1206 * tree of struct device_node. It also fills the "name" and "type"
1207 * pointers of the nodes so the normal device-tree walking functions
1208 * can be used.
1209 */
1210void __init unflatten_device_tree(void)
1211{
1212	void *fdt = initial_boot_params;
1213
1214	/* Save the statically-placed regions in the reserved_mem array */
1215	fdt_scan_reserved_mem_reg_nodes();
 
1216
1217	/* Populate an empty root node when bootloader doesn't provide one */
 
 
 
1218	if (!fdt) {
1219		fdt = (void *) __dtb_empty_root_begin;
1220		/* fdt_totalsize() will be used for copy size */
1221		if (fdt_totalsize(fdt) >
1222		    __dtb_empty_root_end - __dtb_empty_root_begin) {
1223			pr_err("invalid size in dtb_empty_root\n");
1224			return;
1225		}
1226		of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
1227		fdt = copy_device_tree(fdt);
1228	}
1229
1230	__unflatten_device_tree(fdt, NULL, &of_root,
1231				early_init_dt_alloc_memory_arch, false);
1232
1233	/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1234	of_alias_scan(early_init_dt_alloc_memory_arch);
1235
1236	unittest_unflatten_overlay_base();
1237}
1238
1239/**
1240 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1241 *
1242 * Copies and unflattens the device-tree passed by the firmware, creating the
1243 * tree of struct device_node. It also fills the "name" and "type"
1244 * pointers of the nodes so the normal device-tree walking functions
1245 * can be used. This should only be used when the FDT memory has not been
1246 * reserved such is the case when the FDT is built-in to the kernel init
1247 * section. If the FDT memory is reserved already then unflatten_device_tree
1248 * should be used instead.
1249 */
1250void __init unflatten_and_copy_device_tree(void)
1251{
1252	if (initial_boot_params)
1253		initial_boot_params = copy_device_tree(initial_boot_params);
1254
1255	unflatten_device_tree();
1256}
1257
1258#ifdef CONFIG_SYSFS
1259static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
1260			       struct bin_attribute *bin_attr,
1261			       char *buf, loff_t off, size_t count)
1262{
1263	memcpy(buf, initial_boot_params + off, count);
1264	return count;
1265}
1266
1267static int __init of_fdt_raw_init(void)
1268{
1269	static struct bin_attribute of_fdt_raw_attr =
1270		__BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
1271
1272	if (!initial_boot_params)
1273		return 0;
1274
1275	if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1276				     fdt_totalsize(initial_boot_params))) {
1277		pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1278		return 0;
1279	}
1280	of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
1281	return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
1282}
1283late_initcall(of_fdt_raw_init);
1284#endif
1285
1286#endif /* CONFIG_OF_EARLY_FLATTREE */
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Functions for working with the Flattened Device Tree data format
   4 *
   5 * Copyright 2009 Benjamin Herrenschmidt, IBM Corp
   6 * benh@kernel.crashing.org
   7 */
   8
   9#define pr_fmt(fmt)	"OF: fdt: " fmt
  10
  11#include <linux/acpi.h>
  12#include <linux/crash_dump.h>
  13#include <linux/crc32.h>
  14#include <linux/kernel.h>
  15#include <linux/initrd.h>
  16#include <linux/memblock.h>
  17#include <linux/mutex.h>
  18#include <linux/of.h>
  19#include <linux/of_fdt.h>
  20#include <linux/sizes.h>
  21#include <linux/string.h>
  22#include <linux/errno.h>
  23#include <linux/slab.h>
  24#include <linux/libfdt.h>
  25#include <linux/debugfs.h>
  26#include <linux/serial_core.h>
  27#include <linux/sysfs.h>
  28#include <linux/random.h>
  29
  30#include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
  31#include <asm/page.h>
  32
  33#include "of_private.h"
  34
  35/*
  36 * __dtb_empty_root_begin[] and __dtb_empty_root_end[] magically created by
  37 * cmd_dt_S_dtb in scripts/Makefile.lib
  38 */
  39extern uint8_t __dtb_empty_root_begin[];
  40extern uint8_t __dtb_empty_root_end[];
  41
  42/*
  43 * of_fdt_limit_memory - limit the number of regions in the /memory node
  44 * @limit: maximum entries
  45 *
  46 * Adjust the flattened device tree to have at most 'limit' number of
  47 * memory entries in the /memory node. This function may be called
  48 * any time after initial_boot_param is set.
  49 */
  50void __init of_fdt_limit_memory(int limit)
  51{
  52	int memory;
  53	int len;
  54	const void *val;
  55	int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
  56	int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
  57	const __be32 *addr_prop;
  58	const __be32 *size_prop;
  59	int root_offset;
  60	int cell_size;
  61
  62	root_offset = fdt_path_offset(initial_boot_params, "/");
  63	if (root_offset < 0)
  64		return;
  65
  66	addr_prop = fdt_getprop(initial_boot_params, root_offset,
  67				"#address-cells", NULL);
  68	if (addr_prop)
  69		nr_address_cells = fdt32_to_cpu(*addr_prop);
  70
  71	size_prop = fdt_getprop(initial_boot_params, root_offset,
  72				"#size-cells", NULL);
  73	if (size_prop)
  74		nr_size_cells = fdt32_to_cpu(*size_prop);
  75
  76	cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
  77
  78	memory = fdt_path_offset(initial_boot_params, "/memory");
  79	if (memory > 0) {
  80		val = fdt_getprop(initial_boot_params, memory, "reg", &len);
  81		if (len > limit*cell_size) {
  82			len = limit*cell_size;
  83			pr_debug("Limiting number of entries to %d\n", limit);
  84			fdt_setprop(initial_boot_params, memory, "reg", val,
  85					len);
  86		}
  87	}
  88}
  89
  90bool of_fdt_device_is_available(const void *blob, unsigned long node)
  91{
  92	const char *status = fdt_getprop(blob, node, "status", NULL);
  93
  94	if (!status)
  95		return true;
  96
  97	if (!strcmp(status, "ok") || !strcmp(status, "okay"))
  98		return true;
  99
 100	return false;
 101}
 102
 103static void *unflatten_dt_alloc(void **mem, unsigned long size,
 104				       unsigned long align)
 105{
 106	void *res;
 107
 108	*mem = PTR_ALIGN(*mem, align);
 109	res = *mem;
 110	*mem += size;
 111
 112	return res;
 113}
 114
 115static void populate_properties(const void *blob,
 116				int offset,
 117				void **mem,
 118				struct device_node *np,
 119				const char *nodename,
 120				bool dryrun)
 121{
 122	struct property *pp, **pprev = NULL;
 123	int cur;
 124	bool has_name = false;
 125
 126	pprev = &np->properties;
 127	for (cur = fdt_first_property_offset(blob, offset);
 128	     cur >= 0;
 129	     cur = fdt_next_property_offset(blob, cur)) {
 130		const __be32 *val;
 131		const char *pname;
 132		u32 sz;
 133
 134		val = fdt_getprop_by_offset(blob, cur, &pname, &sz);
 135		if (!val) {
 136			pr_warn("Cannot locate property at 0x%x\n", cur);
 137			continue;
 138		}
 139
 140		if (!pname) {
 141			pr_warn("Cannot find property name at 0x%x\n", cur);
 142			continue;
 143		}
 144
 145		if (!strcmp(pname, "name"))
 146			has_name = true;
 147
 148		pp = unflatten_dt_alloc(mem, sizeof(struct property),
 149					__alignof__(struct property));
 150		if (dryrun)
 151			continue;
 152
 153		/* We accept flattened tree phandles either in
 154		 * ePAPR-style "phandle" properties, or the
 155		 * legacy "linux,phandle" properties.  If both
 156		 * appear and have different values, things
 157		 * will get weird. Don't do that.
 158		 */
 159		if (!strcmp(pname, "phandle") ||
 160		    !strcmp(pname, "linux,phandle")) {
 161			if (!np->phandle)
 162				np->phandle = be32_to_cpup(val);
 163		}
 164
 165		/* And we process the "ibm,phandle" property
 166		 * used in pSeries dynamic device tree
 167		 * stuff
 168		 */
 169		if (!strcmp(pname, "ibm,phandle"))
 170			np->phandle = be32_to_cpup(val);
 171
 172		pp->name   = (char *)pname;
 173		pp->length = sz;
 174		pp->value  = (__be32 *)val;
 175		*pprev     = pp;
 176		pprev      = &pp->next;
 177	}
 178
 179	/* With version 0x10 we may not have the name property,
 180	 * recreate it here from the unit name if absent
 181	 */
 182	if (!has_name) {
 183		const char *p = nodename, *ps = p, *pa = NULL;
 184		int len;
 185
 186		while (*p) {
 187			if ((*p) == '@')
 188				pa = p;
 189			else if ((*p) == '/')
 190				ps = p + 1;
 191			p++;
 192		}
 193
 194		if (pa < ps)
 195			pa = p;
 196		len = (pa - ps) + 1;
 197		pp = unflatten_dt_alloc(mem, sizeof(struct property) + len,
 198					__alignof__(struct property));
 199		if (!dryrun) {
 200			pp->name   = "name";
 201			pp->length = len;
 202			pp->value  = pp + 1;
 203			*pprev     = pp;
 204			memcpy(pp->value, ps, len - 1);
 205			((char *)pp->value)[len - 1] = 0;
 206			pr_debug("fixed up name for %s -> %s\n",
 207				 nodename, (char *)pp->value);
 208		}
 209	}
 210}
 211
 212static int populate_node(const void *blob,
 213			  int offset,
 214			  void **mem,
 215			  struct device_node *dad,
 216			  struct device_node **pnp,
 217			  bool dryrun)
 218{
 219	struct device_node *np;
 220	const char *pathp;
 221	int len;
 222
 223	pathp = fdt_get_name(blob, offset, &len);
 224	if (!pathp) {
 225		*pnp = NULL;
 226		return len;
 227	}
 228
 229	len++;
 230
 231	np = unflatten_dt_alloc(mem, sizeof(struct device_node) + len,
 232				__alignof__(struct device_node));
 233	if (!dryrun) {
 234		char *fn;
 235		of_node_init(np);
 236		np->full_name = fn = ((char *)np) + sizeof(*np);
 237
 238		memcpy(fn, pathp, len);
 239
 240		if (dad != NULL) {
 241			np->parent = dad;
 242			np->sibling = dad->child;
 243			dad->child = np;
 244		}
 245	}
 246
 247	populate_properties(blob, offset, mem, np, pathp, dryrun);
 248	if (!dryrun) {
 249		np->name = of_get_property(np, "name", NULL);
 250		if (!np->name)
 251			np->name = "<NULL>";
 252	}
 253
 254	*pnp = np;
 255	return 0;
 256}
 257
 258static void reverse_nodes(struct device_node *parent)
 259{
 260	struct device_node *child, *next;
 261
 262	/* In-depth first */
 263	child = parent->child;
 264	while (child) {
 265		reverse_nodes(child);
 266
 267		child = child->sibling;
 268	}
 269
 270	/* Reverse the nodes in the child list */
 271	child = parent->child;
 272	parent->child = NULL;
 273	while (child) {
 274		next = child->sibling;
 275
 276		child->sibling = parent->child;
 277		parent->child = child;
 278		child = next;
 279	}
 280}
 281
 282/**
 283 * unflatten_dt_nodes - Alloc and populate a device_node from the flat tree
 284 * @blob: The parent device tree blob
 285 * @mem: Memory chunk to use for allocating device nodes and properties
 286 * @dad: Parent struct device_node
 287 * @nodepp: The device_node tree created by the call
 288 *
 289 * Return: The size of unflattened device tree or error code
 290 */
 291static int unflatten_dt_nodes(const void *blob,
 292			      void *mem,
 293			      struct device_node *dad,
 294			      struct device_node **nodepp)
 295{
 296	struct device_node *root;
 297	int offset = 0, depth = 0, initial_depth = 0;
 298#define FDT_MAX_DEPTH	64
 299	struct device_node *nps[FDT_MAX_DEPTH];
 300	void *base = mem;
 301	bool dryrun = !base;
 302	int ret;
 303
 304	if (nodepp)
 305		*nodepp = NULL;
 306
 307	/*
 308	 * We're unflattening device sub-tree if @dad is valid. There are
 309	 * possibly multiple nodes in the first level of depth. We need
 310	 * set @depth to 1 to make fdt_next_node() happy as it bails
 311	 * immediately when negative @depth is found. Otherwise, the device
 312	 * nodes except the first one won't be unflattened successfully.
 313	 */
 314	if (dad)
 315		depth = initial_depth = 1;
 316
 317	root = dad;
 318	nps[depth] = dad;
 319
 320	for (offset = 0;
 321	     offset >= 0 && depth >= initial_depth;
 322	     offset = fdt_next_node(blob, offset, &depth)) {
 323		if (WARN_ON_ONCE(depth >= FDT_MAX_DEPTH - 1))
 324			continue;
 325
 326		if (!IS_ENABLED(CONFIG_OF_KOBJ) &&
 327		    !of_fdt_device_is_available(blob, offset))
 328			continue;
 329
 330		ret = populate_node(blob, offset, &mem, nps[depth],
 331				   &nps[depth+1], dryrun);
 332		if (ret < 0)
 333			return ret;
 334
 335		if (!dryrun && nodepp && !*nodepp)
 336			*nodepp = nps[depth+1];
 337		if (!dryrun && !root)
 338			root = nps[depth+1];
 339	}
 340
 341	if (offset < 0 && offset != -FDT_ERR_NOTFOUND) {
 342		pr_err("Error %d processing FDT\n", offset);
 343		return -EINVAL;
 344	}
 345
 346	/*
 347	 * Reverse the child list. Some drivers assumes node order matches .dts
 348	 * node order
 349	 */
 350	if (!dryrun)
 351		reverse_nodes(root);
 352
 353	return mem - base;
 354}
 355
 356/**
 357 * __unflatten_device_tree - create tree of device_nodes from flat blob
 358 * @blob: The blob to expand
 359 * @dad: Parent device node
 360 * @mynodes: The device_node tree created by the call
 361 * @dt_alloc: An allocator that provides a virtual address to memory
 362 * for the resulting tree
 363 * @detached: if true set OF_DETACHED on @mynodes
 364 *
 365 * unflattens a device-tree, creating the tree of struct device_node. It also
 366 * fills the "name" and "type" pointers of the nodes so the normal device-tree
 367 * walking functions can be used.
 368 *
 369 * Return: NULL on failure or the memory chunk containing the unflattened
 370 * device tree on success.
 371 */
 372void *__unflatten_device_tree(const void *blob,
 373			      struct device_node *dad,
 374			      struct device_node **mynodes,
 375			      void *(*dt_alloc)(u64 size, u64 align),
 376			      bool detached)
 377{
 378	int size;
 379	void *mem;
 380	int ret;
 381
 382	if (mynodes)
 383		*mynodes = NULL;
 384
 385	pr_debug(" -> unflatten_device_tree()\n");
 386
 387	if (!blob) {
 388		pr_debug("No device tree pointer\n");
 389		return NULL;
 390	}
 391
 392	pr_debug("Unflattening device tree:\n");
 393	pr_debug("magic: %08x\n", fdt_magic(blob));
 394	pr_debug("size: %08x\n", fdt_totalsize(blob));
 395	pr_debug("version: %08x\n", fdt_version(blob));
 396
 397	if (fdt_check_header(blob)) {
 398		pr_err("Invalid device tree blob header\n");
 399		return NULL;
 400	}
 401
 402	/* First pass, scan for size */
 403	size = unflatten_dt_nodes(blob, NULL, dad, NULL);
 404	if (size <= 0)
 405		return NULL;
 406
 407	size = ALIGN(size, 4);
 408	pr_debug("  size is %d, allocating...\n", size);
 409
 410	/* Allocate memory for the expanded device tree */
 411	mem = dt_alloc(size + 4, __alignof__(struct device_node));
 412	if (!mem)
 413		return NULL;
 414
 415	memset(mem, 0, size);
 416
 417	*(__be32 *)(mem + size) = cpu_to_be32(0xdeadbeef);
 418
 419	pr_debug("  unflattening %p...\n", mem);
 420
 421	/* Second pass, do actual unflattening */
 422	ret = unflatten_dt_nodes(blob, mem, dad, mynodes);
 423
 424	if (be32_to_cpup(mem + size) != 0xdeadbeef)
 425		pr_warn("End of tree marker overwritten: %08x\n",
 426			be32_to_cpup(mem + size));
 427
 428	if (ret <= 0)
 429		return NULL;
 430
 431	if (detached && mynodes && *mynodes) {
 432		of_node_set_flag(*mynodes, OF_DETACHED);
 433		pr_debug("unflattened tree is detached\n");
 434	}
 435
 436	pr_debug(" <- unflatten_device_tree()\n");
 437	return mem;
 438}
 439
 440static void *kernel_tree_alloc(u64 size, u64 align)
 441{
 442	return kzalloc(size, GFP_KERNEL);
 443}
 444
 445static DEFINE_MUTEX(of_fdt_unflatten_mutex);
 446
 447/**
 448 * of_fdt_unflatten_tree - create tree of device_nodes from flat blob
 449 * @blob: Flat device tree blob
 450 * @dad: Parent device node
 451 * @mynodes: The device tree created by the call
 452 *
 453 * unflattens the device-tree passed by the firmware, creating the
 454 * tree of struct device_node. It also fills the "name" and "type"
 455 * pointers of the nodes so the normal device-tree walking functions
 456 * can be used.
 457 *
 458 * Return: NULL on failure or the memory chunk containing the unflattened
 459 * device tree on success.
 460 */
 461void *of_fdt_unflatten_tree(const unsigned long *blob,
 462			    struct device_node *dad,
 463			    struct device_node **mynodes)
 464{
 465	void *mem;
 466
 467	mutex_lock(&of_fdt_unflatten_mutex);
 468	mem = __unflatten_device_tree(blob, dad, mynodes, &kernel_tree_alloc,
 469				      true);
 470	mutex_unlock(&of_fdt_unflatten_mutex);
 471
 472	return mem;
 473}
 474EXPORT_SYMBOL_GPL(of_fdt_unflatten_tree);
 475
 476/* Everything below here references initial_boot_params directly. */
 477int __initdata dt_root_addr_cells;
 478int __initdata dt_root_size_cells;
 479
 480void *initial_boot_params __ro_after_init;
 
 481
 482#ifdef CONFIG_OF_EARLY_FLATTREE
 483
 484static u32 of_fdt_crc32;
 485
 486/*
 487 * fdt_reserve_elfcorehdr() - reserves memory for elf core header
 488 *
 489 * This function reserves the memory occupied by an elf core header
 490 * described in the device tree. This region contains all the
 491 * information about primary kernel's core image and is used by a dump
 492 * capture kernel to access the system memory on primary kernel.
 493 */
 494static void __init fdt_reserve_elfcorehdr(void)
 495{
 496	if (!IS_ENABLED(CONFIG_CRASH_DUMP) || !elfcorehdr_size)
 497		return;
 498
 499	if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
 500		pr_warn("elfcorehdr is overlapped\n");
 501		return;
 502	}
 503
 504	memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
 505
 506	pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
 507		elfcorehdr_size >> 10, elfcorehdr_addr);
 508}
 509
 510/**
 511 * early_init_fdt_scan_reserved_mem() - create reserved memory regions
 512 *
 513 * This function grabs memory from early allocator for device exclusive use
 514 * defined in device tree structures. It should be called by arch specific code
 515 * once the early allocator (i.e. memblock) has been fully activated.
 516 */
 517void __init early_init_fdt_scan_reserved_mem(void)
 518{
 519	int n;
 520	u64 base, size;
 521
 522	if (!initial_boot_params)
 523		return;
 524
 525	fdt_scan_reserved_mem();
 526	fdt_reserve_elfcorehdr();
 527
 528	/* Process header /memreserve/ fields */
 529	for (n = 0; ; n++) {
 530		fdt_get_mem_rsv(initial_boot_params, n, &base, &size);
 531		if (!size)
 532			break;
 533		memblock_reserve(base, size);
 534	}
 535
 536	fdt_init_reserved_mem();
 537}
 538
 539/**
 540 * early_init_fdt_reserve_self() - reserve the memory used by the FDT blob
 541 */
 542void __init early_init_fdt_reserve_self(void)
 543{
 544	if (!initial_boot_params)
 545		return;
 546
 547	/* Reserve the dtb region */
 548	memblock_reserve(__pa(initial_boot_params),
 549			 fdt_totalsize(initial_boot_params));
 550}
 551
 552/**
 553 * of_scan_flat_dt - scan flattened tree blob and call callback on each.
 554 * @it: callback function
 555 * @data: context data pointer
 556 *
 557 * This function is used to scan the flattened device-tree, it is
 558 * used to extract the memory information at boot before we can
 559 * unflatten the tree
 560 */
 561int __init of_scan_flat_dt(int (*it)(unsigned long node,
 562				     const char *uname, int depth,
 563				     void *data),
 564			   void *data)
 565{
 566	const void *blob = initial_boot_params;
 567	const char *pathp;
 568	int offset, rc = 0, depth = -1;
 569
 570	if (!blob)
 571		return 0;
 572
 573	for (offset = fdt_next_node(blob, -1, &depth);
 574	     offset >= 0 && depth >= 0 && !rc;
 575	     offset = fdt_next_node(blob, offset, &depth)) {
 576
 577		pathp = fdt_get_name(blob, offset, NULL);
 578		rc = it(offset, pathp, depth, data);
 579	}
 580	return rc;
 581}
 582
 583/**
 584 * of_scan_flat_dt_subnodes - scan sub-nodes of a node call callback on each.
 585 * @parent: parent node
 586 * @it: callback function
 587 * @data: context data pointer
 588 *
 589 * This function is used to scan sub-nodes of a node.
 590 */
 591int __init of_scan_flat_dt_subnodes(unsigned long parent,
 592				    int (*it)(unsigned long node,
 593					      const char *uname,
 594					      void *data),
 595				    void *data)
 596{
 597	const void *blob = initial_boot_params;
 598	int node;
 599
 600	fdt_for_each_subnode(node, blob, parent) {
 601		const char *pathp;
 602		int rc;
 603
 604		pathp = fdt_get_name(blob, node, NULL);
 605		rc = it(node, pathp, data);
 606		if (rc)
 607			return rc;
 608	}
 609	return 0;
 610}
 611
 612/**
 613 * of_get_flat_dt_subnode_by_name - get the subnode by given name
 614 *
 615 * @node: the parent node
 616 * @uname: the name of subnode
 617 * @return offset of the subnode, or -FDT_ERR_NOTFOUND if there is none
 618 */
 619
 620int __init of_get_flat_dt_subnode_by_name(unsigned long node, const char *uname)
 621{
 622	return fdt_subnode_offset(initial_boot_params, node, uname);
 623}
 624
 625/*
 626 * of_get_flat_dt_root - find the root node in the flat blob
 627 */
 628unsigned long __init of_get_flat_dt_root(void)
 629{
 630	return 0;
 631}
 632
 633/*
 634 * of_get_flat_dt_prop - Given a node in the flat blob, return the property ptr
 635 *
 636 * This function can be used within scan_flattened_dt callback to get
 637 * access to properties
 638 */
 639const void *__init of_get_flat_dt_prop(unsigned long node, const char *name,
 640				       int *size)
 641{
 642	return fdt_getprop(initial_boot_params, node, name, size);
 643}
 644
 645/**
 646 * of_fdt_is_compatible - Return true if given node from the given blob has
 647 * compat in its compatible list
 648 * @blob: A device tree blob
 649 * @node: node to test
 650 * @compat: compatible string to compare with compatible list.
 651 *
 652 * Return: a non-zero value on match with smaller values returned for more
 653 * specific compatible values.
 654 */
 655static int of_fdt_is_compatible(const void *blob,
 656		      unsigned long node, const char *compat)
 657{
 658	const char *cp;
 659	int cplen;
 660	unsigned long l, score = 0;
 661
 662	cp = fdt_getprop(blob, node, "compatible", &cplen);
 663	if (cp == NULL)
 664		return 0;
 665	while (cplen > 0) {
 666		score++;
 667		if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
 668			return score;
 669		l = strlen(cp) + 1;
 670		cp += l;
 671		cplen -= l;
 672	}
 673
 674	return 0;
 675}
 676
 677/**
 678 * of_flat_dt_is_compatible - Return true if given node has compat in compatible list
 679 * @node: node to test
 680 * @compat: compatible string to compare with compatible list.
 681 */
 682int __init of_flat_dt_is_compatible(unsigned long node, const char *compat)
 683{
 684	return of_fdt_is_compatible(initial_boot_params, node, compat);
 685}
 686
 687/*
 688 * of_flat_dt_match - Return true if node matches a list of compatible values
 689 */
 690static int __init of_flat_dt_match(unsigned long node, const char *const *compat)
 691{
 692	unsigned int tmp, score = 0;
 693
 694	if (!compat)
 695		return 0;
 696
 697	while (*compat) {
 698		tmp = of_fdt_is_compatible(initial_boot_params, node, *compat);
 699		if (tmp && (score == 0 || (tmp < score)))
 700			score = tmp;
 701		compat++;
 702	}
 703
 704	return score;
 705}
 706
 707/*
 708 * of_get_flat_dt_phandle - Given a node in the flat blob, return the phandle
 709 */
 710uint32_t __init of_get_flat_dt_phandle(unsigned long node)
 711{
 712	return fdt_get_phandle(initial_boot_params, node);
 713}
 714
 715const char * __init of_flat_dt_get_machine_name(void)
 716{
 717	const char *name;
 718	unsigned long dt_root = of_get_flat_dt_root();
 719
 720	name = of_get_flat_dt_prop(dt_root, "model", NULL);
 721	if (!name)
 722		name = of_get_flat_dt_prop(dt_root, "compatible", NULL);
 723	return name;
 724}
 725
 726/**
 727 * of_flat_dt_match_machine - Iterate match tables to find matching machine.
 728 *
 729 * @default_match: A machine specific ptr to return in case of no match.
 730 * @get_next_compat: callback function to return next compatible match table.
 731 *
 732 * Iterate through machine match tables to find the best match for the machine
 733 * compatible string in the FDT.
 734 */
 735const void * __init of_flat_dt_match_machine(const void *default_match,
 736		const void * (*get_next_compat)(const char * const**))
 737{
 738	const void *data = NULL;
 739	const void *best_data = default_match;
 740	const char *const *compat;
 741	unsigned long dt_root;
 742	unsigned int best_score = ~1, score = 0;
 743
 744	dt_root = of_get_flat_dt_root();
 745	while ((data = get_next_compat(&compat))) {
 746		score = of_flat_dt_match(dt_root, compat);
 747		if (score > 0 && score < best_score) {
 748			best_data = data;
 749			best_score = score;
 750		}
 751	}
 752	if (!best_data) {
 753		const char *prop;
 754		int size;
 755
 756		pr_err("\n unrecognized device tree list:\n[ ");
 757
 758		prop = of_get_flat_dt_prop(dt_root, "compatible", &size);
 759		if (prop) {
 760			while (size > 0) {
 761				printk("'%s' ", prop);
 762				size -= strlen(prop) + 1;
 763				prop += strlen(prop) + 1;
 764			}
 765		}
 766		printk("]\n\n");
 767		return NULL;
 768	}
 769
 770	pr_info("Machine model: %s\n", of_flat_dt_get_machine_name());
 771
 772	return best_data;
 773}
 774
 775static void __early_init_dt_declare_initrd(unsigned long start,
 776					   unsigned long end)
 777{
 778	/*
 779	 * __va() is not yet available this early on some platforms. In that
 780	 * case, the platform uses phys_initrd_start/phys_initrd_size instead
 781	 * and does the VA conversion itself.
 782	 */
 783	if (!IS_ENABLED(CONFIG_ARM64) &&
 784	    !(IS_ENABLED(CONFIG_RISCV) && IS_ENABLED(CONFIG_64BIT))) {
 785		initrd_start = (unsigned long)__va(start);
 786		initrd_end = (unsigned long)__va(end);
 787		initrd_below_start_ok = 1;
 788	}
 789}
 790
 791/**
 792 * early_init_dt_check_for_initrd - Decode initrd location from flat tree
 793 * @node: reference to node containing initrd location ('chosen')
 794 */
 795static void __init early_init_dt_check_for_initrd(unsigned long node)
 796{
 797	u64 start, end;
 798	int len;
 799	const __be32 *prop;
 800
 801	if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD))
 802		return;
 803
 804	pr_debug("Looking for initrd properties... ");
 805
 806	prop = of_get_flat_dt_prop(node, "linux,initrd-start", &len);
 807	if (!prop)
 808		return;
 809	start = of_read_number(prop, len/4);
 810
 811	prop = of_get_flat_dt_prop(node, "linux,initrd-end", &len);
 812	if (!prop)
 813		return;
 814	end = of_read_number(prop, len/4);
 815	if (start > end)
 816		return;
 817
 818	__early_init_dt_declare_initrd(start, end);
 819	phys_initrd_start = start;
 820	phys_initrd_size = end - start;
 821
 822	pr_debug("initrd_start=0x%llx  initrd_end=0x%llx\n", start, end);
 823}
 824
 825/**
 826 * early_init_dt_check_for_elfcorehdr - Decode elfcorehdr location from flat
 827 * tree
 828 * @node: reference to node containing elfcorehdr location ('chosen')
 829 */
 830static void __init early_init_dt_check_for_elfcorehdr(unsigned long node)
 831{
 832	const __be32 *prop;
 833	int len;
 834
 835	if (!IS_ENABLED(CONFIG_CRASH_DUMP))
 836		return;
 837
 838	pr_debug("Looking for elfcorehdr property... ");
 839
 840	prop = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
 841	if (!prop || (len < (dt_root_addr_cells + dt_root_size_cells)))
 842		return;
 843
 844	elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, &prop);
 845	elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, &prop);
 846
 847	pr_debug("elfcorehdr_start=0x%llx elfcorehdr_size=0x%llx\n",
 848		 elfcorehdr_addr, elfcorehdr_size);
 849}
 850
 851static unsigned long chosen_node_offset = -FDT_ERR_NOTFOUND;
 852
 853/*
 854 * The main usage of linux,usable-memory-range is for crash dump kernel.
 855 * Originally, the number of usable-memory regions is one. Now there may
 856 * be two regions, low region and high region.
 857 * To make compatibility with existing user-space and older kdump, the low
 858 * region is always the last range of linux,usable-memory-range if exist.
 859 */
 860#define MAX_USABLE_RANGES		2
 861
 862/**
 863 * early_init_dt_check_for_usable_mem_range - Decode usable memory range
 864 * location from flat tree
 865 */
 866void __init early_init_dt_check_for_usable_mem_range(void)
 867{
 868	struct memblock_region rgn[MAX_USABLE_RANGES] = {0};
 869	const __be32 *prop, *endp;
 870	int len, i;
 871	unsigned long node = chosen_node_offset;
 872
 873	if ((long)node < 0)
 874		return;
 875
 876	pr_debug("Looking for usable-memory-range property... ");
 877
 878	prop = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
 879	if (!prop || (len % (dt_root_addr_cells + dt_root_size_cells)))
 880		return;
 881
 882	endp = prop + (len / sizeof(__be32));
 883	for (i = 0; i < MAX_USABLE_RANGES && prop < endp; i++) {
 884		rgn[i].base = dt_mem_next_cell(dt_root_addr_cells, &prop);
 885		rgn[i].size = dt_mem_next_cell(dt_root_size_cells, &prop);
 886
 887		pr_debug("cap_mem_regions[%d]: base=%pa, size=%pa\n",
 888			 i, &rgn[i].base, &rgn[i].size);
 889	}
 890
 891	memblock_cap_memory_range(rgn[0].base, rgn[0].size);
 892	for (i = 1; i < MAX_USABLE_RANGES && rgn[i].size; i++)
 893		memblock_add(rgn[i].base, rgn[i].size);
 894}
 895
 896#ifdef CONFIG_SERIAL_EARLYCON
 897
 898int __init early_init_dt_scan_chosen_stdout(void)
 899{
 900	int offset;
 901	const char *p, *q, *options = NULL;
 902	int l;
 903	const struct earlycon_id *match;
 904	const void *fdt = initial_boot_params;
 905	int ret;
 906
 907	offset = fdt_path_offset(fdt, "/chosen");
 908	if (offset < 0)
 909		offset = fdt_path_offset(fdt, "/chosen@0");
 910	if (offset < 0)
 911		return -ENOENT;
 912
 913	p = fdt_getprop(fdt, offset, "stdout-path", &l);
 914	if (!p)
 915		p = fdt_getprop(fdt, offset, "linux,stdout-path", &l);
 916	if (!p || !l)
 917		return -ENOENT;
 918
 919	q = strchrnul(p, ':');
 920	if (*q != '\0')
 921		options = q + 1;
 922	l = q - p;
 923
 924	/* Get the node specified by stdout-path */
 925	offset = fdt_path_offset_namelen(fdt, p, l);
 926	if (offset < 0) {
 927		pr_warn("earlycon: stdout-path %.*s not found\n", l, p);
 928		return 0;
 929	}
 930
 931	for (match = __earlycon_table; match < __earlycon_table_end; match++) {
 932		if (!match->compatible[0])
 933			continue;
 934
 935		if (fdt_node_check_compatible(fdt, offset, match->compatible))
 936			continue;
 937
 938		ret = of_setup_earlycon(match, offset, options);
 939		if (!ret || ret == -EALREADY)
 940			return 0;
 941	}
 942	return -ENODEV;
 943}
 944#endif
 945
 946/*
 947 * early_init_dt_scan_root - fetch the top level address and size cells
 948 */
 949int __init early_init_dt_scan_root(void)
 950{
 951	const __be32 *prop;
 952	const void *fdt = initial_boot_params;
 953	int node = fdt_path_offset(fdt, "/");
 954
 955	if (node < 0)
 956		return -ENODEV;
 957
 958	dt_root_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
 959	dt_root_addr_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
 960
 961	prop = of_get_flat_dt_prop(node, "#size-cells", NULL);
 962	if (prop)
 963		dt_root_size_cells = be32_to_cpup(prop);
 964	pr_debug("dt_root_size_cells = %x\n", dt_root_size_cells);
 965
 966	prop = of_get_flat_dt_prop(node, "#address-cells", NULL);
 967	if (prop)
 968		dt_root_addr_cells = be32_to_cpup(prop);
 969	pr_debug("dt_root_addr_cells = %x\n", dt_root_addr_cells);
 970
 971	return 0;
 972}
 973
 974u64 __init dt_mem_next_cell(int s, const __be32 **cellp)
 975{
 976	const __be32 *p = *cellp;
 977
 978	*cellp = p + s;
 979	return of_read_number(p, s);
 980}
 981
 982/*
 983 * early_init_dt_scan_memory - Look for and parse memory nodes
 984 */
 985int __init early_init_dt_scan_memory(void)
 986{
 987	int node, found_memory = 0;
 988	const void *fdt = initial_boot_params;
 989
 990	fdt_for_each_subnode(node, fdt, 0) {
 991		const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
 992		const __be32 *reg, *endp;
 993		int l;
 994		bool hotpluggable;
 995
 996		/* We are scanning "memory" nodes only */
 997		if (type == NULL || strcmp(type, "memory") != 0)
 998			continue;
 999
1000		if (!of_fdt_device_is_available(fdt, node))
1001			continue;
1002
1003		reg = of_get_flat_dt_prop(node, "linux,usable-memory", &l);
1004		if (reg == NULL)
1005			reg = of_get_flat_dt_prop(node, "reg", &l);
1006		if (reg == NULL)
1007			continue;
1008
1009		endp = reg + (l / sizeof(__be32));
1010		hotpluggable = of_get_flat_dt_prop(node, "hotpluggable", NULL);
1011
1012		pr_debug("memory scan node %s, reg size %d,\n",
1013			 fdt_get_name(fdt, node, NULL), l);
1014
1015		while ((endp - reg) >= (dt_root_addr_cells + dt_root_size_cells)) {
1016			u64 base, size;
1017
1018			base = dt_mem_next_cell(dt_root_addr_cells, &reg);
1019			size = dt_mem_next_cell(dt_root_size_cells, &reg);
1020
1021			if (size == 0)
1022				continue;
1023			pr_debug(" - %llx, %llx\n", base, size);
1024
1025			early_init_dt_add_memory_arch(base, size);
1026
1027			found_memory = 1;
1028
1029			if (!hotpluggable)
1030				continue;
1031
1032			if (memblock_mark_hotplug(base, size))
1033				pr_warn("failed to mark hotplug range 0x%llx - 0x%llx\n",
1034					base, base + size);
1035		}
1036	}
1037	return found_memory;
1038}
1039
1040int __init early_init_dt_scan_chosen(char *cmdline)
1041{
1042	int l, node;
1043	const char *p;
1044	const void *rng_seed;
1045	const void *fdt = initial_boot_params;
1046
1047	node = fdt_path_offset(fdt, "/chosen");
1048	if (node < 0)
1049		node = fdt_path_offset(fdt, "/chosen@0");
1050	if (node < 0)
1051		/* Handle the cmdline config options even if no /chosen node */
1052		goto handle_cmdline;
1053
1054	chosen_node_offset = node;
1055
1056	early_init_dt_check_for_initrd(node);
1057	early_init_dt_check_for_elfcorehdr(node);
1058
1059	rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l);
1060	if (rng_seed && l > 0) {
1061		add_bootloader_randomness(rng_seed, l);
1062
1063		/* try to clear seed so it won't be found. */
1064		fdt_nop_property(initial_boot_params, node, "rng-seed");
1065
1066		/* update CRC check value */
1067		of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1068				fdt_totalsize(initial_boot_params));
1069	}
1070
1071	/* Retrieve command line */
1072	p = of_get_flat_dt_prop(node, "bootargs", &l);
1073	if (p != NULL && l > 0)
1074		strscpy(cmdline, p, min(l, COMMAND_LINE_SIZE));
1075
1076handle_cmdline:
1077	/*
1078	 * CONFIG_CMDLINE is meant to be a default in case nothing else
1079	 * managed to set the command line, unless CONFIG_CMDLINE_FORCE
1080	 * is set in which case we override whatever was found earlier.
1081	 */
1082#ifdef CONFIG_CMDLINE
1083#if defined(CONFIG_CMDLINE_EXTEND)
1084	strlcat(cmdline, " ", COMMAND_LINE_SIZE);
1085	strlcat(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1086#elif defined(CONFIG_CMDLINE_FORCE)
1087	strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1088#else
1089	/* No arguments from boot loader, use kernel's  cmdl*/
1090	if (!((char *)cmdline)[0])
1091		strscpy(cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
1092#endif
1093#endif /* CONFIG_CMDLINE */
1094
1095	pr_debug("Command line is: %s\n", (char *)cmdline);
1096
1097	return 0;
1098}
1099
1100#ifndef MIN_MEMBLOCK_ADDR
1101#define MIN_MEMBLOCK_ADDR	__pa(PAGE_OFFSET)
1102#endif
1103#ifndef MAX_MEMBLOCK_ADDR
1104#define MAX_MEMBLOCK_ADDR	((phys_addr_t)~0)
1105#endif
1106
1107void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
1108{
1109	const u64 phys_offset = MIN_MEMBLOCK_ADDR;
1110
1111	if (size < PAGE_SIZE - (base & ~PAGE_MASK)) {
1112		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1113			base, base + size);
1114		return;
1115	}
1116
1117	if (!PAGE_ALIGNED(base)) {
1118		size -= PAGE_SIZE - (base & ~PAGE_MASK);
1119		base = PAGE_ALIGN(base);
1120	}
1121	size &= PAGE_MASK;
1122
1123	if (base > MAX_MEMBLOCK_ADDR) {
1124		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1125			base, base + size);
1126		return;
1127	}
1128
1129	if (base + size - 1 > MAX_MEMBLOCK_ADDR) {
1130		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1131			((u64)MAX_MEMBLOCK_ADDR) + 1, base + size);
1132		size = MAX_MEMBLOCK_ADDR - base + 1;
1133	}
1134
1135	if (base + size < phys_offset) {
1136		pr_warn("Ignoring memory block 0x%llx - 0x%llx\n",
1137			base, base + size);
1138		return;
1139	}
1140	if (base < phys_offset) {
1141		pr_warn("Ignoring memory range 0x%llx - 0x%llx\n",
1142			base, phys_offset);
1143		size -= phys_offset - base;
1144		base = phys_offset;
1145	}
1146	memblock_add(base, size);
1147}
1148
1149static void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
1150{
1151	void *ptr = memblock_alloc(size, align);
1152
1153	if (!ptr)
1154		panic("%s: Failed to allocate %llu bytes align=0x%llx\n",
1155		      __func__, size, align);
1156
1157	return ptr;
1158}
1159
1160bool __init early_init_dt_verify(void *params)
1161{
1162	if (!params)
1163		return false;
1164
1165	/* check device tree validity */
1166	if (fdt_check_header(params))
1167		return false;
1168
1169	/* Setup flat device-tree pointer */
1170	initial_boot_params = params;
 
1171	of_fdt_crc32 = crc32_be(~0, initial_boot_params,
1172				fdt_totalsize(initial_boot_params));
 
 
 
 
1173	return true;
1174}
1175
1176
1177void __init early_init_dt_scan_nodes(void)
1178{
1179	int rc;
1180
1181	/* Initialize {size,address}-cells info */
1182	early_init_dt_scan_root();
1183
1184	/* Retrieve various information from the /chosen node */
1185	rc = early_init_dt_scan_chosen(boot_command_line);
1186	if (rc)
1187		pr_warn("No chosen node found, continuing without\n");
1188
1189	/* Setup memory, calling early_init_dt_add_memory_arch */
1190	early_init_dt_scan_memory();
1191
1192	/* Handle linux,usable-memory-range property */
1193	early_init_dt_check_for_usable_mem_range();
1194}
1195
1196bool __init early_init_dt_scan(void *params)
1197{
1198	bool status;
1199
1200	status = early_init_dt_verify(params);
1201	if (!status)
1202		return false;
1203
1204	early_init_dt_scan_nodes();
1205	return true;
1206}
1207
1208static void *__init copy_device_tree(void *fdt)
1209{
1210	int size;
1211	void *dt;
1212
1213	size = fdt_totalsize(fdt);
1214	dt = early_init_dt_alloc_memory_arch(size,
1215					     roundup_pow_of_two(FDT_V17_SIZE));
1216
1217	if (dt)
1218		memcpy(dt, fdt, size);
1219
1220	return dt;
1221}
1222
1223/**
1224 * unflatten_device_tree - create tree of device_nodes from flat blob
1225 *
1226 * unflattens the device-tree passed by the firmware, creating the
1227 * tree of struct device_node. It also fills the "name" and "type"
1228 * pointers of the nodes so the normal device-tree walking functions
1229 * can be used.
1230 */
1231void __init unflatten_device_tree(void)
1232{
1233	void *fdt = initial_boot_params;
1234
1235	/* Don't use the bootloader provided DTB if ACPI is enabled */
1236	if (!acpi_disabled)
1237		fdt = NULL;
1238
1239	/*
1240	 * Populate an empty root node when ACPI is enabled or bootloader
1241	 * doesn't provide one.
1242	 */
1243	if (!fdt) {
1244		fdt = (void *) __dtb_empty_root_begin;
1245		/* fdt_totalsize() will be used for copy size */
1246		if (fdt_totalsize(fdt) >
1247		    __dtb_empty_root_end - __dtb_empty_root_begin) {
1248			pr_err("invalid size in dtb_empty_root\n");
1249			return;
1250		}
1251		of_fdt_crc32 = crc32_be(~0, fdt, fdt_totalsize(fdt));
1252		fdt = copy_device_tree(fdt);
1253	}
1254
1255	__unflatten_device_tree(fdt, NULL, &of_root,
1256				early_init_dt_alloc_memory_arch, false);
1257
1258	/* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */
1259	of_alias_scan(early_init_dt_alloc_memory_arch);
1260
1261	unittest_unflatten_overlay_base();
1262}
1263
1264/**
1265 * unflatten_and_copy_device_tree - copy and create tree of device_nodes from flat blob
1266 *
1267 * Copies and unflattens the device-tree passed by the firmware, creating the
1268 * tree of struct device_node. It also fills the "name" and "type"
1269 * pointers of the nodes so the normal device-tree walking functions
1270 * can be used. This should only be used when the FDT memory has not been
1271 * reserved such is the case when the FDT is built-in to the kernel init
1272 * section. If the FDT memory is reserved already then unflatten_device_tree
1273 * should be used instead.
1274 */
1275void __init unflatten_and_copy_device_tree(void)
1276{
1277	if (initial_boot_params)
1278		initial_boot_params = copy_device_tree(initial_boot_params);
1279
1280	unflatten_device_tree();
1281}
1282
1283#ifdef CONFIG_SYSFS
1284static ssize_t of_fdt_raw_read(struct file *filp, struct kobject *kobj,
1285			       struct bin_attribute *bin_attr,
1286			       char *buf, loff_t off, size_t count)
1287{
1288	memcpy(buf, initial_boot_params + off, count);
1289	return count;
1290}
1291
1292static int __init of_fdt_raw_init(void)
1293{
1294	static struct bin_attribute of_fdt_raw_attr =
1295		__BIN_ATTR(fdt, S_IRUSR, of_fdt_raw_read, NULL, 0);
1296
1297	if (!initial_boot_params)
1298		return 0;
1299
1300	if (of_fdt_crc32 != crc32_be(~0, initial_boot_params,
1301				     fdt_totalsize(initial_boot_params))) {
1302		pr_warn("not creating '/sys/firmware/fdt': CRC check failed\n");
1303		return 0;
1304	}
1305	of_fdt_raw_attr.size = fdt_totalsize(initial_boot_params);
1306	return sysfs_create_bin_file(firmware_kobj, &of_fdt_raw_attr);
1307}
1308late_initcall(of_fdt_raw_init);
1309#endif
1310
1311#endif /* CONFIG_OF_EARLY_FLATTREE */