Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt)	"OF: " fmt
   3
   4#include <linux/device.h>
   5#include <linux/fwnode.h>
   6#include <linux/io.h>
   7#include <linux/ioport.h>
   8#include <linux/logic_pio.h>
   9#include <linux/module.h>
  10#include <linux/of_address.h>
  11#include <linux/pci.h>
  12#include <linux/pci_regs.h>
  13#include <linux/sizes.h>
  14#include <linux/slab.h>
  15#include <linux/string.h>
  16#include <linux/dma-direct.h> /* for bus_dma_region */
  17
  18#include "of_private.h"
  19
  20/* Max address size we deal with */
  21#define OF_MAX_ADDR_CELLS	4
  22#define OF_CHECK_ADDR_COUNT(na)	((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
  23#define OF_CHECK_COUNTS(na, ns)	(OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
  24
  25static struct of_bus *of_match_bus(struct device_node *np);
  26static int __of_address_to_resource(struct device_node *dev, int index,
  27		int bar_no, struct resource *r);
  28static bool of_mmio_is_nonposted(struct device_node *np);
  29
  30/* Debug utility */
  31#ifdef DEBUG
  32static void of_dump_addr(const char *s, const __be32 *addr, int na)
  33{
  34	pr_debug("%s", s);
  35	while (na--)
  36		pr_cont(" %08x", be32_to_cpu(*(addr++)));
  37	pr_cont("\n");
  38}
  39#else
  40static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
  41#endif
  42
  43/* Callbacks for bus specific translators */
  44struct of_bus {
  45	const char	*name;
  46	const char	*addresses;
  47	int		(*match)(struct device_node *parent);
  48	void		(*count_cells)(struct device_node *child,
  49				       int *addrc, int *sizec);
  50	u64		(*map)(__be32 *addr, const __be32 *range,
  51				int na, int ns, int pna);
  52	int		(*translate)(__be32 *addr, u64 offset, int na);
  53	bool	has_flags;
  54	unsigned int	(*get_flags)(const __be32 *addr);
  55};
  56
  57/*
  58 * Default translator (generic bus)
  59 */
  60
  61static void of_bus_default_count_cells(struct device_node *dev,
  62				       int *addrc, int *sizec)
  63{
  64	if (addrc)
  65		*addrc = of_n_addr_cells(dev);
  66	if (sizec)
  67		*sizec = of_n_size_cells(dev);
  68}
  69
  70static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
  71		int na, int ns, int pna)
  72{
  73	u64 cp, s, da;
  74
  75	cp = of_read_number(range, na);
  76	s  = of_read_number(range + na + pna, ns);
  77	da = of_read_number(addr, na);
  78
  79	pr_debug("default map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
 
 
  80
  81	if (da < cp || da >= (cp + s))
  82		return OF_BAD_ADDR;
  83	return da - cp;
  84}
  85
  86static int of_bus_default_translate(__be32 *addr, u64 offset, int na)
  87{
  88	u64 a = of_read_number(addr, na);
  89	memset(addr, 0, na * 4);
  90	a += offset;
  91	if (na > 1)
  92		addr[na - 2] = cpu_to_be32(a >> 32);
  93	addr[na - 1] = cpu_to_be32(a & 0xffffffffu);
  94
  95	return 0;
  96}
  97
  98static unsigned int of_bus_default_get_flags(const __be32 *addr)
  99{
 100	return IORESOURCE_MEM;
 101}
 102
 103#ifdef CONFIG_PCI
 104static unsigned int of_bus_pci_get_flags(const __be32 *addr)
 105{
 106	unsigned int flags = 0;
 107	u32 w = be32_to_cpup(addr);
 108
 109	if (!IS_ENABLED(CONFIG_PCI))
 110		return 0;
 111
 112	switch((w >> 24) & 0x03) {
 113	case 0x01:
 114		flags |= IORESOURCE_IO;
 115		break;
 116	case 0x02: /* 32 bits */
 117		flags |= IORESOURCE_MEM;
 118		break;
 119
 120	case 0x03: /* 64 bits */
 121		flags |= IORESOURCE_MEM | IORESOURCE_MEM_64;
 122		break;
 123	}
 124	if (w & 0x40000000)
 125		flags |= IORESOURCE_PREFETCH;
 126	return flags;
 127}
 128
 129/*
 130 * PCI bus specific translator
 131 */
 132
 133static bool of_node_is_pcie(struct device_node *np)
 134{
 135	bool is_pcie = of_node_name_eq(np, "pcie");
 136
 137	if (is_pcie)
 138		pr_warn_once("%pOF: Missing device_type\n", np);
 139
 140	return is_pcie;
 141}
 142
 143static int of_bus_pci_match(struct device_node *np)
 144{
 145	/*
 146 	 * "pciex" is PCI Express
 147	 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
 148	 * "ht" is hypertransport
 149	 *
 150	 * If none of the device_type match, and that the node name is
 151	 * "pcie", accept the device as PCI (with a warning).
 152	 */
 153	return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
 154		of_node_is_type(np, "vci") || of_node_is_type(np, "ht") ||
 155		of_node_is_pcie(np);
 156}
 157
 158static void of_bus_pci_count_cells(struct device_node *np,
 159				   int *addrc, int *sizec)
 160{
 161	if (addrc)
 162		*addrc = 3;
 163	if (sizec)
 164		*sizec = 2;
 165}
 166
 167static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
 168		int pna)
 169{
 170	u64 cp, s, da;
 171	unsigned int af, rf;
 172
 173	af = of_bus_pci_get_flags(addr);
 174	rf = of_bus_pci_get_flags(range);
 175
 176	/* Check address type match */
 177	if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
 178		return OF_BAD_ADDR;
 179
 180	/* Read address values, skipping high cell */
 181	cp = of_read_number(range + 1, na - 1);
 182	s  = of_read_number(range + na + pna, ns);
 183	da = of_read_number(addr + 1, na - 1);
 184
 185	pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
 
 
 186
 187	if (da < cp || da >= (cp + s))
 188		return OF_BAD_ADDR;
 189	return da - cp;
 190}
 191
 192static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
 193{
 194	return of_bus_default_translate(addr + 1, offset, na - 1);
 195}
 196#endif /* CONFIG_PCI */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 197
 198int of_pci_address_to_resource(struct device_node *dev, int bar,
 199			       struct resource *r)
 200{
 
 
 
 201
 202	if (!IS_ENABLED(CONFIG_PCI))
 203		return -ENOSYS;
 204
 205	return __of_address_to_resource(dev, -1, bar, r);
 206}
 207EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
 208
 209/*
 210 * of_pci_range_to_resource - Create a resource from an of_pci_range
 211 * @range:	the PCI range that describes the resource
 212 * @np:		device node where the range belongs to
 213 * @res:	pointer to a valid resource that will be updated to
 214 *              reflect the values contained in the range.
 215 *
 216 * Returns EINVAL if the range cannot be converted to resource.
 217 *
 218 * Note that if the range is an IO range, the resource will be converted
 219 * using pci_address_to_pio() which can fail if it is called too early or
 220 * if the range cannot be matched to any host bridge IO space (our case here).
 221 * To guard against that we try to register the IO range first.
 222 * If that fails we know that pci_address_to_pio() will do too.
 223 */
 224int of_pci_range_to_resource(struct of_pci_range *range,
 225			     struct device_node *np, struct resource *res)
 226{
 227	int err;
 228	res->flags = range->flags;
 229	res->parent = res->child = res->sibling = NULL;
 230	res->name = np->full_name;
 231
 232	if (!IS_ENABLED(CONFIG_PCI))
 233		return -ENOSYS;
 234
 235	if (res->flags & IORESOURCE_IO) {
 236		unsigned long port;
 237		err = pci_register_io_range(&np->fwnode, range->cpu_addr,
 238				range->size);
 239		if (err)
 240			goto invalid_range;
 241		port = pci_address_to_pio(range->cpu_addr);
 242		if (port == (unsigned long)-1) {
 243			err = -EINVAL;
 244			goto invalid_range;
 245		}
 246		res->start = port;
 247	} else {
 248		if ((sizeof(resource_size_t) < 8) &&
 249		    upper_32_bits(range->cpu_addr)) {
 250			err = -EINVAL;
 251			goto invalid_range;
 252		}
 253
 254		res->start = range->cpu_addr;
 255	}
 256	res->end = res->start + range->size - 1;
 257	return 0;
 258
 259invalid_range:
 260	res->start = (resource_size_t)OF_BAD_ADDR;
 261	res->end = (resource_size_t)OF_BAD_ADDR;
 262	return err;
 263}
 264EXPORT_SYMBOL(of_pci_range_to_resource);
 
 265
 266/*
 267 * ISA bus specific translator
 268 */
 269
 270static int of_bus_isa_match(struct device_node *np)
 271{
 272	return of_node_name_eq(np, "isa");
 273}
 274
 275static void of_bus_isa_count_cells(struct device_node *child,
 276				   int *addrc, int *sizec)
 277{
 278	if (addrc)
 279		*addrc = 2;
 280	if (sizec)
 281		*sizec = 1;
 282}
 283
 284static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
 285		int pna)
 286{
 287	u64 cp, s, da;
 288
 289	/* Check address type match */
 290	if ((addr[0] ^ range[0]) & cpu_to_be32(1))
 291		return OF_BAD_ADDR;
 292
 293	/* Read address values, skipping high cell */
 294	cp = of_read_number(range + 1, na - 1);
 295	s  = of_read_number(range + na + pna, ns);
 296	da = of_read_number(addr + 1, na - 1);
 297
 298	pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n", cp, s, da);
 
 
 299
 300	if (da < cp || da >= (cp + s))
 301		return OF_BAD_ADDR;
 302	return da - cp;
 303}
 304
 305static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
 306{
 307	return of_bus_default_translate(addr + 1, offset, na - 1);
 308}
 309
 310static unsigned int of_bus_isa_get_flags(const __be32 *addr)
 311{
 312	unsigned int flags = 0;
 313	u32 w = be32_to_cpup(addr);
 314
 315	if (w & 1)
 316		flags |= IORESOURCE_IO;
 317	else
 318		flags |= IORESOURCE_MEM;
 319	return flags;
 320}
 321
 322/*
 323 * Array of bus specific translators
 324 */
 325
 326static struct of_bus of_busses[] = {
 327#ifdef CONFIG_PCI
 328	/* PCI */
 329	{
 330		.name = "pci",
 331		.addresses = "assigned-addresses",
 332		.match = of_bus_pci_match,
 333		.count_cells = of_bus_pci_count_cells,
 334		.map = of_bus_pci_map,
 335		.translate = of_bus_pci_translate,
 336		.has_flags = true,
 337		.get_flags = of_bus_pci_get_flags,
 338	},
 339#endif /* CONFIG_PCI */
 340	/* ISA */
 341	{
 342		.name = "isa",
 343		.addresses = "reg",
 344		.match = of_bus_isa_match,
 345		.count_cells = of_bus_isa_count_cells,
 346		.map = of_bus_isa_map,
 347		.translate = of_bus_isa_translate,
 348		.has_flags = true,
 349		.get_flags = of_bus_isa_get_flags,
 350	},
 351	/* Default */
 352	{
 353		.name = "default",
 354		.addresses = "reg",
 355		.match = NULL,
 356		.count_cells = of_bus_default_count_cells,
 357		.map = of_bus_default_map,
 358		.translate = of_bus_default_translate,
 359		.get_flags = of_bus_default_get_flags,
 360	},
 361};
 362
 363static struct of_bus *of_match_bus(struct device_node *np)
 364{
 365	int i;
 366
 367	for (i = 0; i < ARRAY_SIZE(of_busses); i++)
 368		if (!of_busses[i].match || of_busses[i].match(np))
 369			return &of_busses[i];
 370	BUG();
 371	return NULL;
 372}
 373
 374static int of_empty_ranges_quirk(struct device_node *np)
 375{
 376	if (IS_ENABLED(CONFIG_PPC)) {
 377		/* To save cycles, we cache the result for global "Mac" setting */
 378		static int quirk_state = -1;
 379
 380		/* PA-SEMI sdc DT bug */
 381		if (of_device_is_compatible(np, "1682m-sdc"))
 382			return true;
 383
 384		/* Make quirk cached */
 385		if (quirk_state < 0)
 386			quirk_state =
 387				of_machine_is_compatible("Power Macintosh") ||
 388				of_machine_is_compatible("MacRISC");
 389		return quirk_state;
 390	}
 391	return false;
 392}
 393
 394static int of_translate_one(struct device_node *parent, struct of_bus *bus,
 395			    struct of_bus *pbus, __be32 *addr,
 396			    int na, int ns, int pna, const char *rprop)
 397{
 398	const __be32 *ranges;
 399	unsigned int rlen;
 400	int rone;
 401	u64 offset = OF_BAD_ADDR;
 402
 403	/*
 404	 * Normally, an absence of a "ranges" property means we are
 405	 * crossing a non-translatable boundary, and thus the addresses
 406	 * below the current cannot be converted to CPU physical ones.
 407	 * Unfortunately, while this is very clear in the spec, it's not
 408	 * what Apple understood, and they do have things like /uni-n or
 409	 * /ht nodes with no "ranges" property and a lot of perfectly
 410	 * useable mapped devices below them. Thus we treat the absence of
 411	 * "ranges" as equivalent to an empty "ranges" property which means
 412	 * a 1:1 translation at that level. It's up to the caller not to try
 413	 * to translate addresses that aren't supposed to be translated in
 414	 * the first place. --BenH.
 415	 *
 416	 * As far as we know, this damage only exists on Apple machines, so
 417	 * This code is only enabled on powerpc. --gcl
 418	 *
 419	 * This quirk also applies for 'dma-ranges' which frequently exist in
 420	 * child nodes without 'dma-ranges' in the parent nodes. --RobH
 421	 */
 422	ranges = of_get_property(parent, rprop, &rlen);
 423	if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
 424	    strcmp(rprop, "dma-ranges")) {
 425		pr_debug("no ranges; cannot translate\n");
 426		return 1;
 427	}
 428	if (ranges == NULL || rlen == 0) {
 429		offset = of_read_number(addr, na);
 430		memset(addr, 0, pna * 4);
 431		pr_debug("empty ranges; 1:1 translation\n");
 432		goto finish;
 433	}
 434
 435	pr_debug("walking ranges...\n");
 436
 437	/* Now walk through the ranges */
 438	rlen /= 4;
 439	rone = na + pna + ns;
 440	for (; rlen >= rone; rlen -= rone, ranges += rone) {
 441		offset = bus->map(addr, ranges, na, ns, pna);
 442		if (offset != OF_BAD_ADDR)
 443			break;
 444	}
 445	if (offset == OF_BAD_ADDR) {
 446		pr_debug("not found !\n");
 447		return 1;
 448	}
 449	memcpy(addr, ranges + na, 4 * pna);
 450
 451 finish:
 452	of_dump_addr("parent translation for:", addr, pna);
 453	pr_debug("with offset: %llx\n", offset);
 454
 455	/* Translate it into parent bus space */
 456	return pbus->translate(addr, offset, pna);
 457}
 458
 459/*
 460 * Translate an address from the device-tree into a CPU physical address,
 461 * this walks up the tree and applies the various bus mappings on the
 462 * way.
 463 *
 464 * Note: We consider that crossing any level with #size-cells == 0 to mean
 465 * that translation is impossible (that is we are not dealing with a value
 466 * that can be mapped to a cpu physical address). This is not really specified
 467 * that way, but this is traditionally the way IBM at least do things
 468 *
 469 * Whenever the translation fails, the *host pointer will be set to the
 470 * device that had registered logical PIO mapping, and the return code is
 471 * relative to that node.
 472 */
 473static u64 __of_translate_address(struct device_node *dev,
 474				  struct device_node *(*get_parent)(const struct device_node *),
 475				  const __be32 *in_addr, const char *rprop,
 476				  struct device_node **host)
 477{
 478	struct device_node *parent = NULL;
 479	struct of_bus *bus, *pbus;
 480	__be32 addr[OF_MAX_ADDR_CELLS];
 481	int na, ns, pna, pns;
 482	u64 result = OF_BAD_ADDR;
 483
 484	pr_debug("** translation for device %pOF **\n", dev);
 485
 486	/* Increase refcount at current level */
 487	of_node_get(dev);
 488
 489	*host = NULL;
 490	/* Get parent & match bus type */
 491	parent = get_parent(dev);
 492	if (parent == NULL)
 493		goto bail;
 494	bus = of_match_bus(parent);
 495
 496	/* Count address cells & copy address locally */
 497	bus->count_cells(dev, &na, &ns);
 498	if (!OF_CHECK_COUNTS(na, ns)) {
 499		pr_debug("Bad cell count for %pOF\n", dev);
 500		goto bail;
 501	}
 502	memcpy(addr, in_addr, na * 4);
 503
 504	pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n",
 505	    bus->name, na, ns, parent);
 506	of_dump_addr("translating address:", addr, na);
 507
 508	/* Translate */
 509	for (;;) {
 510		struct logic_pio_hwaddr *iorange;
 511
 512		/* Switch to parent bus */
 513		of_node_put(dev);
 514		dev = parent;
 515		parent = get_parent(dev);
 516
 517		/* If root, we have finished */
 518		if (parent == NULL) {
 519			pr_debug("reached root node\n");
 520			result = of_read_number(addr, na);
 521			break;
 522		}
 523
 524		/*
 525		 * For indirectIO device which has no ranges property, get
 526		 * the address from reg directly.
 527		 */
 528		iorange = find_io_range_by_fwnode(&dev->fwnode);
 529		if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) {
 530			result = of_read_number(addr + 1, na - 1);
 531			pr_debug("indirectIO matched(%pOF) 0x%llx\n",
 532				 dev, result);
 533			*host = of_node_get(dev);
 534			break;
 535		}
 536
 537		/* Get new parent bus and counts */
 538		pbus = of_match_bus(parent);
 539		pbus->count_cells(dev, &pna, &pns);
 540		if (!OF_CHECK_COUNTS(pna, pns)) {
 541			pr_err("Bad cell count for %pOF\n", dev);
 542			break;
 543		}
 544
 545		pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n",
 546		    pbus->name, pna, pns, parent);
 547
 548		/* Apply bus translation */
 549		if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
 550			break;
 551
 552		/* Complete the move up one level */
 553		na = pna;
 554		ns = pns;
 555		bus = pbus;
 556
 557		of_dump_addr("one level translation:", addr, na);
 558	}
 559 bail:
 560	of_node_put(parent);
 561	of_node_put(dev);
 562
 563	return result;
 564}
 565
 566u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
 567{
 568	struct device_node *host;
 569	u64 ret;
 570
 571	ret = __of_translate_address(dev, of_get_parent,
 572				     in_addr, "ranges", &host);
 573	if (host) {
 574		of_node_put(host);
 575		return OF_BAD_ADDR;
 576	}
 577
 578	return ret;
 579}
 580EXPORT_SYMBOL(of_translate_address);
 581
 582#ifdef CONFIG_HAS_DMA
 583struct device_node *__of_get_dma_parent(const struct device_node *np)
 584{
 585	struct of_phandle_args args;
 586	int ret, index;
 587
 588	index = of_property_match_string(np, "interconnect-names", "dma-mem");
 589	if (index < 0)
 590		return of_get_parent(np);
 591
 592	ret = of_parse_phandle_with_args(np, "interconnects",
 593					 "#interconnect-cells",
 594					 index, &args);
 595	if (ret < 0)
 596		return of_get_parent(np);
 597
 598	return of_node_get(args.np);
 599}
 600#endif
 601
 602static struct device_node *of_get_next_dma_parent(struct device_node *np)
 603{
 604	struct device_node *parent;
 605
 606	parent = __of_get_dma_parent(np);
 607	of_node_put(np);
 608
 609	return parent;
 610}
 611
 612u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
 613{
 614	struct device_node *host;
 615	u64 ret;
 616
 617	ret = __of_translate_address(dev, __of_get_dma_parent,
 618				     in_addr, "dma-ranges", &host);
 619
 620	if (host) {
 621		of_node_put(host);
 622		return OF_BAD_ADDR;
 623	}
 624
 625	return ret;
 626}
 627EXPORT_SYMBOL(of_translate_dma_address);
 628
 629const __be32 *__of_get_address(struct device_node *dev, int index, int bar_no,
 630			       u64 *size, unsigned int *flags)
 631{
 632	const __be32 *prop;
 633	unsigned int psize;
 634	struct device_node *parent;
 635	struct of_bus *bus;
 636	int onesize, i, na, ns;
 637
 638	/* Get parent & match bus type */
 639	parent = of_get_parent(dev);
 640	if (parent == NULL)
 641		return NULL;
 642	bus = of_match_bus(parent);
 643	if (strcmp(bus->name, "pci") && (bar_no >= 0)) {
 644		of_node_put(parent);
 645		return NULL;
 646	}
 647	bus->count_cells(dev, &na, &ns);
 648	of_node_put(parent);
 649	if (!OF_CHECK_ADDR_COUNT(na))
 650		return NULL;
 651
 652	/* Get "reg" or "assigned-addresses" property */
 653	prop = of_get_property(dev, bus->addresses, &psize);
 654	if (prop == NULL)
 655		return NULL;
 656	psize /= 4;
 657
 658	onesize = na + ns;
 659	for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
 660		u32 val = be32_to_cpu(prop[0]);
 661		/* PCI bus matches on BAR number instead of index */
 662		if (((bar_no >= 0) && ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0))) ||
 663		    ((index >= 0) && (i == index))) {
 664			if (size)
 665				*size = of_read_number(prop + na, ns);
 666			if (flags)
 667				*flags = bus->get_flags(prop);
 668			return prop;
 669		}
 670	}
 671	return NULL;
 672}
 673EXPORT_SYMBOL(__of_get_address);
 674
 675static int parser_init(struct of_pci_range_parser *parser,
 676			struct device_node *node, const char *name)
 677{
 678	int rlen;
 679
 680	parser->node = node;
 681	parser->pna = of_n_addr_cells(node);
 682	parser->na = of_bus_n_addr_cells(node);
 683	parser->ns = of_bus_n_size_cells(node);
 684	parser->dma = !strcmp(name, "dma-ranges");
 685	parser->bus = of_match_bus(node);
 686
 687	parser->range = of_get_property(node, name, &rlen);
 688	if (parser->range == NULL)
 689		return -ENOENT;
 690
 691	parser->end = parser->range + rlen / sizeof(__be32);
 692
 693	return 0;
 694}
 695
 696int of_pci_range_parser_init(struct of_pci_range_parser *parser,
 697				struct device_node *node)
 698{
 699	return parser_init(parser, node, "ranges");
 700}
 701EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
 702
 703int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
 704				struct device_node *node)
 705{
 706	return parser_init(parser, node, "dma-ranges");
 707}
 708EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
 709#define of_dma_range_parser_init of_pci_dma_range_parser_init
 710
 711struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
 712						struct of_pci_range *range)
 713{
 714	int na = parser->na;
 715	int ns = parser->ns;
 716	int np = parser->pna + na + ns;
 717	int busflag_na = 0;
 718
 719	if (!range)
 720		return NULL;
 721
 722	if (!parser->range || parser->range + np > parser->end)
 723		return NULL;
 724
 725	range->flags = parser->bus->get_flags(parser->range);
 726
 727	/* A extra cell for resource flags */
 728	if (parser->bus->has_flags)
 729		busflag_na = 1;
 730
 731	range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
 732
 733	if (parser->dma)
 734		range->cpu_addr = of_translate_dma_address(parser->node,
 735				parser->range + na);
 736	else
 737		range->cpu_addr = of_translate_address(parser->node,
 738				parser->range + na);
 739	range->size = of_read_number(parser->range + parser->pna + na, ns);
 740
 741	parser->range += np;
 742
 743	/* Now consume following elements while they are contiguous */
 744	while (parser->range + np <= parser->end) {
 745		u32 flags = 0;
 746		u64 bus_addr, cpu_addr, size;
 747
 748		flags = parser->bus->get_flags(parser->range);
 749		bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
 750		if (parser->dma)
 751			cpu_addr = of_translate_dma_address(parser->node,
 752					parser->range + na);
 753		else
 754			cpu_addr = of_translate_address(parser->node,
 755					parser->range + na);
 756		size = of_read_number(parser->range + parser->pna + na, ns);
 757
 758		if (flags != range->flags)
 759			break;
 760		if (bus_addr != range->bus_addr + range->size ||
 761		    cpu_addr != range->cpu_addr + range->size)
 762			break;
 763
 764		range->size += size;
 765		parser->range += np;
 766	}
 767
 768	return range;
 769}
 770EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
 771
 772static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
 773			u64 size)
 774{
 775	u64 taddr;
 776	unsigned long port;
 777	struct device_node *host;
 778
 779	taddr = __of_translate_address(dev, of_get_parent,
 780				       in_addr, "ranges", &host);
 781	if (host) {
 782		/* host-specific port access */
 783		port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size);
 784		of_node_put(host);
 785	} else {
 786		/* memory-mapped I/O range */
 787		port = pci_address_to_pio(taddr);
 788	}
 789
 790	if (port == (unsigned long)-1)
 791		return OF_BAD_ADDR;
 792
 793	return port;
 794}
 795
 796static int __of_address_to_resource(struct device_node *dev, int index, int bar_no,
 797		struct resource *r)
 
 798{
 799	u64 taddr;
 800	const __be32	*addrp;
 801	u64		size;
 802	unsigned int	flags;
 803	const char	*name = NULL;
 804
 805	addrp = __of_get_address(dev, index, bar_no, &size, &flags);
 806	if (addrp == NULL)
 807		return -EINVAL;
 808
 809	/* Get optional "reg-names" property to add a name to a resource */
 810	if (index >= 0)
 811		of_property_read_string_index(dev, "reg-names",	index, &name);
 812
 813	if (flags & IORESOURCE_MEM)
 814		taddr = of_translate_address(dev, addrp);
 815	else if (flags & IORESOURCE_IO)
 816		taddr = of_translate_ioport(dev, addrp, size);
 817	else
 818		return -EINVAL;
 819
 820	if (taddr == OF_BAD_ADDR)
 821		return -EINVAL;
 822	memset(r, 0, sizeof(struct resource));
 823
 824	if (of_mmio_is_nonposted(dev))
 825		flags |= IORESOURCE_MEM_NONPOSTED;
 826
 827	r->start = taddr;
 828	r->end = taddr + size - 1;
 829	r->flags = flags;
 830	r->name = name ? name : dev->full_name;
 831
 832	return 0;
 833}
 834
 835/**
 836 * of_address_to_resource - Translate device tree address and return as resource
 837 * @dev:	Caller's Device Node
 838 * @index:	Index into the array
 839 * @r:		Pointer to resource array
 840 *
 841 * Note that if your address is a PIO address, the conversion will fail if
 842 * the physical address can't be internally converted to an IO token with
 843 * pci_address_to_pio(), that is because it's either called too early or it
 844 * can't be matched to any host bridge IO space
 845 */
 846int of_address_to_resource(struct device_node *dev, int index,
 847			   struct resource *r)
 848{
 849	return __of_address_to_resource(dev, index, -1, r);
 
 
 
 
 
 
 
 
 
 
 
 
 850}
 851EXPORT_SYMBOL_GPL(of_address_to_resource);
 852
 853/**
 854 * of_iomap - Maps the memory mapped IO for a given device_node
 855 * @np:		the device whose io range will be mapped
 856 * @index:	index of the io range
 857 *
 858 * Returns a pointer to the mapped memory
 859 */
 860void __iomem *of_iomap(struct device_node *np, int index)
 861{
 862	struct resource res;
 863
 864	if (of_address_to_resource(np, index, &res))
 865		return NULL;
 866
 867	if (res.flags & IORESOURCE_MEM_NONPOSTED)
 868		return ioremap_np(res.start, resource_size(&res));
 869	else
 870		return ioremap(res.start, resource_size(&res));
 871}
 872EXPORT_SYMBOL(of_iomap);
 873
 874/*
 875 * of_io_request_and_map - Requests a resource and maps the memory mapped IO
 876 *			   for a given device_node
 877 * @device:	the device whose io range will be mapped
 878 * @index:	index of the io range
 879 * @name:	name "override" for the memory region request or NULL
 880 *
 881 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
 882 * error code on failure. Usage example:
 883 *
 884 *	base = of_io_request_and_map(node, 0, "foo");
 885 *	if (IS_ERR(base))
 886 *		return PTR_ERR(base);
 887 */
 888void __iomem *of_io_request_and_map(struct device_node *np, int index,
 889				    const char *name)
 890{
 891	struct resource res;
 892	void __iomem *mem;
 893
 894	if (of_address_to_resource(np, index, &res))
 895		return IOMEM_ERR_PTR(-EINVAL);
 896
 897	if (!name)
 898		name = res.name;
 899	if (!request_mem_region(res.start, resource_size(&res), name))
 900		return IOMEM_ERR_PTR(-EBUSY);
 901
 902	if (res.flags & IORESOURCE_MEM_NONPOSTED)
 903		mem = ioremap_np(res.start, resource_size(&res));
 904	else
 905		mem = ioremap(res.start, resource_size(&res));
 906
 907	if (!mem) {
 908		release_mem_region(res.start, resource_size(&res));
 909		return IOMEM_ERR_PTR(-ENOMEM);
 910	}
 911
 912	return mem;
 913}
 914EXPORT_SYMBOL(of_io_request_and_map);
 915
 916#ifdef CONFIG_HAS_DMA
 917/**
 918 * of_dma_get_range - Get DMA range info and put it into a map array
 919 * @np:		device node to get DMA range info
 920 * @map:	dma range structure to return
 
 
 921 *
 922 * Look in bottom up direction for the first "dma-ranges" property
 923 * and parse it.  Put the information into a DMA offset map array.
 924 *
 925 * dma-ranges format:
 926 *	DMA addr (dma_addr)	: naddr cells
 927 *	CPU addr (phys_addr_t)	: pna cells
 928 *	size			: nsize cells
 929 *
 930 * It returns -ENODEV if "dma-ranges" property was not found for this
 931 * device in the DT.
 932 */
 933int of_dma_get_range(struct device_node *np, const struct bus_dma_region **map)
 934{
 935	struct device_node *node = of_node_get(np);
 936	const __be32 *ranges = NULL;
 
 
 937	bool found_dma_ranges = false;
 938	struct of_range_parser parser;
 939	struct of_range range;
 940	struct bus_dma_region *r;
 941	int len, num_ranges = 0;
 942	int ret = 0;
 943
 944	while (node) {
 945		ranges = of_get_property(node, "dma-ranges", &len);
 946
 947		/* Ignore empty ranges, they imply no translation required */
 948		if (ranges && len > 0)
 949			break;
 950
 951		/* Once we find 'dma-ranges', then a missing one is an error */
 952		if (found_dma_ranges && !ranges) {
 953			ret = -ENODEV;
 954			goto out;
 955		}
 956		found_dma_ranges = true;
 957
 958		node = of_get_next_dma_parent(node);
 959	}
 960
 961	if (!node || !ranges) {
 962		pr_debug("no dma-ranges found for node(%pOF)\n", np);
 963		ret = -ENODEV;
 964		goto out;
 965	}
 966
 967	of_dma_range_parser_init(&parser, node);
 
 968	for_each_of_range(&parser, &range) {
 
 
 
 
 
 
 
 
 969		if (range.cpu_addr == OF_BAD_ADDR) {
 970			pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
 971			       range.bus_addr, node);
 972			continue;
 973		}
 974		num_ranges++;
 
 
 
 
 
 
 975	}
 976
 977	if (!num_ranges) {
 978		ret = -EINVAL;
 
 
 979		goto out;
 980	}
 981
 982	r = kcalloc(num_ranges + 1, sizeof(*r), GFP_KERNEL);
 983	if (!r) {
 984		ret = -ENOMEM;
 985		goto out;
 986	}
 
 987
 988	/*
 989	 * Record all info in the generic DMA ranges array for struct device,
 990	 * returning an error if we don't find any parsable ranges.
 991	 */
 992	*map = r;
 993	of_dma_range_parser_init(&parser, node);
 994	for_each_of_range(&parser, &range) {
 995		pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
 996			 range.bus_addr, range.cpu_addr, range.size);
 997		if (range.cpu_addr == OF_BAD_ADDR)
 998			continue;
 999		r->cpu_start = range.cpu_addr;
1000		r->dma_start = range.bus_addr;
1001		r->size = range.size;
1002		r->offset = range.cpu_addr - range.bus_addr;
1003		r++;
1004	}
1005out:
1006	of_node_put(node);
1007	return ret;
1008}
1009#endif /* CONFIG_HAS_DMA */
1010
1011/**
1012 * of_dma_get_max_cpu_address - Gets highest CPU address suitable for DMA
1013 * @np: The node to start searching from or NULL to start from the root
1014 *
1015 * Gets the highest CPU physical address that is addressable by all DMA masters
1016 * in the sub-tree pointed by np, or the whole tree if NULL is passed. If no
1017 * DMA constrained device is found, it returns PHYS_ADDR_MAX.
1018 */
1019phys_addr_t __init of_dma_get_max_cpu_address(struct device_node *np)
1020{
1021	phys_addr_t max_cpu_addr = PHYS_ADDR_MAX;
1022	struct of_range_parser parser;
1023	phys_addr_t subtree_max_addr;
1024	struct device_node *child;
1025	struct of_range range;
1026	const __be32 *ranges;
1027	u64 cpu_end = 0;
1028	int len;
1029
1030	if (!np)
1031		np = of_root;
1032
1033	ranges = of_get_property(np, "dma-ranges", &len);
1034	if (ranges && len) {
1035		of_dma_range_parser_init(&parser, np);
1036		for_each_of_range(&parser, &range)
1037			if (range.cpu_addr + range.size > cpu_end)
1038				cpu_end = range.cpu_addr + range.size - 1;
1039
1040		if (max_cpu_addr > cpu_end)
1041			max_cpu_addr = cpu_end;
1042	}
1043
1044	for_each_available_child_of_node(np, child) {
1045		subtree_max_addr = of_dma_get_max_cpu_address(child);
1046		if (max_cpu_addr > subtree_max_addr)
1047			max_cpu_addr = subtree_max_addr;
1048	}
1049
1050	return max_cpu_addr;
1051}
1052
1053/**
1054 * of_dma_is_coherent - Check if device is coherent
1055 * @np:	device node
1056 *
1057 * It returns true if "dma-coherent" property was found
1058 * for this device in the DT, or if DMA is coherent by
1059 * default for OF devices on the current platform and no
1060 * "dma-noncoherent" property was found for this device.
1061 */
1062bool of_dma_is_coherent(struct device_node *np)
1063{
1064	struct device_node *node;
1065	bool is_coherent = IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT);
1066
1067	node = of_node_get(np);
 
1068
1069	while (node) {
1070		if (of_property_read_bool(node, "dma-coherent")) {
1071			is_coherent = true;
1072			break;
1073		}
1074		if (of_property_read_bool(node, "dma-noncoherent")) {
1075			is_coherent = false;
1076			break;
1077		}
1078		node = of_get_next_dma_parent(node);
1079	}
1080	of_node_put(node);
1081	return is_coherent;
1082}
1083EXPORT_SYMBOL_GPL(of_dma_is_coherent);
1084
1085/**
1086 * of_mmio_is_nonposted - Check if device uses non-posted MMIO
1087 * @np:	device node
1088 *
1089 * Returns true if the "nonposted-mmio" property was found for
1090 * the device's bus.
1091 *
1092 * This is currently only enabled on builds that support Apple ARM devices, as
1093 * an optimization.
1094 */
1095static bool of_mmio_is_nonposted(struct device_node *np)
1096{
1097	struct device_node *parent;
1098	bool nonposted;
1099
1100	if (!IS_ENABLED(CONFIG_ARCH_APPLE))
1101		return false;
1102
1103	parent = of_get_parent(np);
1104	if (!parent)
1105		return false;
1106
1107	nonposted = of_property_read_bool(parent, "nonposted-mmio");
1108
1109	of_node_put(parent);
1110	return nonposted;
1111}
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt)	"OF: " fmt
   3
   4#include <linux/device.h>
   5#include <linux/fwnode.h>
   6#include <linux/io.h>
   7#include <linux/ioport.h>
   8#include <linux/logic_pio.h>
   9#include <linux/module.h>
  10#include <linux/of_address.h>
  11#include <linux/pci.h>
  12#include <linux/pci_regs.h>
  13#include <linux/sizes.h>
  14#include <linux/slab.h>
  15#include <linux/string.h>
 
  16
  17#include "of_private.h"
  18
  19/* Max address size we deal with */
  20#define OF_MAX_ADDR_CELLS	4
  21#define OF_CHECK_ADDR_COUNT(na)	((na) > 0 && (na) <= OF_MAX_ADDR_CELLS)
  22#define OF_CHECK_COUNTS(na, ns)	(OF_CHECK_ADDR_COUNT(na) && (ns) > 0)
  23
  24static struct of_bus *of_match_bus(struct device_node *np);
  25static int __of_address_to_resource(struct device_node *dev,
  26		const __be32 *addrp, u64 size, unsigned int flags,
  27		const char *name, struct resource *r);
  28
  29/* Debug utility */
  30#ifdef DEBUG
  31static void of_dump_addr(const char *s, const __be32 *addr, int na)
  32{
  33	pr_debug("%s", s);
  34	while (na--)
  35		pr_cont(" %08x", be32_to_cpu(*(addr++)));
  36	pr_cont("\n");
  37}
  38#else
  39static void of_dump_addr(const char *s, const __be32 *addr, int na) { }
  40#endif
  41
  42/* Callbacks for bus specific translators */
  43struct of_bus {
  44	const char	*name;
  45	const char	*addresses;
  46	int		(*match)(struct device_node *parent);
  47	void		(*count_cells)(struct device_node *child,
  48				       int *addrc, int *sizec);
  49	u64		(*map)(__be32 *addr, const __be32 *range,
  50				int na, int ns, int pna);
  51	int		(*translate)(__be32 *addr, u64 offset, int na);
  52	bool	has_flags;
  53	unsigned int	(*get_flags)(const __be32 *addr);
  54};
  55
  56/*
  57 * Default translator (generic bus)
  58 */
  59
  60static void of_bus_default_count_cells(struct device_node *dev,
  61				       int *addrc, int *sizec)
  62{
  63	if (addrc)
  64		*addrc = of_n_addr_cells(dev);
  65	if (sizec)
  66		*sizec = of_n_size_cells(dev);
  67}
  68
  69static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
  70		int na, int ns, int pna)
  71{
  72	u64 cp, s, da;
  73
  74	cp = of_read_number(range, na);
  75	s  = of_read_number(range + na + pna, ns);
  76	da = of_read_number(addr, na);
  77
  78	pr_debug("default map, cp=%llx, s=%llx, da=%llx\n",
  79		 (unsigned long long)cp, (unsigned long long)s,
  80		 (unsigned long long)da);
  81
  82	if (da < cp || da >= (cp + s))
  83		return OF_BAD_ADDR;
  84	return da - cp;
  85}
  86
  87static int of_bus_default_translate(__be32 *addr, u64 offset, int na)
  88{
  89	u64 a = of_read_number(addr, na);
  90	memset(addr, 0, na * 4);
  91	a += offset;
  92	if (na > 1)
  93		addr[na - 2] = cpu_to_be32(a >> 32);
  94	addr[na - 1] = cpu_to_be32(a & 0xffffffffu);
  95
  96	return 0;
  97}
  98
  99static unsigned int of_bus_default_get_flags(const __be32 *addr)
 100{
 101	return IORESOURCE_MEM;
 102}
 103
 104#ifdef CONFIG_PCI
 105static unsigned int of_bus_pci_get_flags(const __be32 *addr)
 106{
 107	unsigned int flags = 0;
 108	u32 w = be32_to_cpup(addr);
 109
 110	if (!IS_ENABLED(CONFIG_PCI))
 111		return 0;
 112
 113	switch((w >> 24) & 0x03) {
 114	case 0x01:
 115		flags |= IORESOURCE_IO;
 116		break;
 117	case 0x02: /* 32 bits */
 
 
 
 118	case 0x03: /* 64 bits */
 119		flags |= IORESOURCE_MEM;
 120		break;
 121	}
 122	if (w & 0x40000000)
 123		flags |= IORESOURCE_PREFETCH;
 124	return flags;
 125}
 126
 127/*
 128 * PCI bus specific translator
 129 */
 130
 131static bool of_node_is_pcie(struct device_node *np)
 132{
 133	bool is_pcie = of_node_name_eq(np, "pcie");
 134
 135	if (is_pcie)
 136		pr_warn_once("%pOF: Missing device_type\n", np);
 137
 138	return is_pcie;
 139}
 140
 141static int of_bus_pci_match(struct device_node *np)
 142{
 143	/*
 144 	 * "pciex" is PCI Express
 145	 * "vci" is for the /chaos bridge on 1st-gen PCI powermacs
 146	 * "ht" is hypertransport
 147	 *
 148	 * If none of the device_type match, and that the node name is
 149	 * "pcie", accept the device as PCI (with a warning).
 150	 */
 151	return of_node_is_type(np, "pci") || of_node_is_type(np, "pciex") ||
 152		of_node_is_type(np, "vci") || of_node_is_type(np, "ht") ||
 153		of_node_is_pcie(np);
 154}
 155
 156static void of_bus_pci_count_cells(struct device_node *np,
 157				   int *addrc, int *sizec)
 158{
 159	if (addrc)
 160		*addrc = 3;
 161	if (sizec)
 162		*sizec = 2;
 163}
 164
 165static u64 of_bus_pci_map(__be32 *addr, const __be32 *range, int na, int ns,
 166		int pna)
 167{
 168	u64 cp, s, da;
 169	unsigned int af, rf;
 170
 171	af = of_bus_pci_get_flags(addr);
 172	rf = of_bus_pci_get_flags(range);
 173
 174	/* Check address type match */
 175	if ((af ^ rf) & (IORESOURCE_MEM | IORESOURCE_IO))
 176		return OF_BAD_ADDR;
 177
 178	/* Read address values, skipping high cell */
 179	cp = of_read_number(range + 1, na - 1);
 180	s  = of_read_number(range + na + pna, ns);
 181	da = of_read_number(addr + 1, na - 1);
 182
 183	pr_debug("PCI map, cp=%llx, s=%llx, da=%llx\n",
 184		 (unsigned long long)cp, (unsigned long long)s,
 185		 (unsigned long long)da);
 186
 187	if (da < cp || da >= (cp + s))
 188		return OF_BAD_ADDR;
 189	return da - cp;
 190}
 191
 192static int of_bus_pci_translate(__be32 *addr, u64 offset, int na)
 193{
 194	return of_bus_default_translate(addr + 1, offset, na - 1);
 195}
 196
 197const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size,
 198			unsigned int *flags)
 199{
 200	const __be32 *prop;
 201	unsigned int psize;
 202	struct device_node *parent;
 203	struct of_bus *bus;
 204	int onesize, i, na, ns;
 205
 206	/* Get parent & match bus type */
 207	parent = of_get_parent(dev);
 208	if (parent == NULL)
 209		return NULL;
 210	bus = of_match_bus(parent);
 211	if (strcmp(bus->name, "pci")) {
 212		of_node_put(parent);
 213		return NULL;
 214	}
 215	bus->count_cells(dev, &na, &ns);
 216	of_node_put(parent);
 217	if (!OF_CHECK_ADDR_COUNT(na))
 218		return NULL;
 219
 220	/* Get "reg" or "assigned-addresses" property */
 221	prop = of_get_property(dev, bus->addresses, &psize);
 222	if (prop == NULL)
 223		return NULL;
 224	psize /= 4;
 225
 226	onesize = na + ns;
 227	for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) {
 228		u32 val = be32_to_cpu(prop[0]);
 229		if ((val & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) {
 230			if (size)
 231				*size = of_read_number(prop + na, ns);
 232			if (flags)
 233				*flags = bus->get_flags(prop);
 234			return prop;
 235		}
 236	}
 237	return NULL;
 238}
 239EXPORT_SYMBOL(of_get_pci_address);
 240
 241int of_pci_address_to_resource(struct device_node *dev, int bar,
 242			       struct resource *r)
 243{
 244	const __be32	*addrp;
 245	u64		size;
 246	unsigned int	flags;
 247
 248	addrp = of_get_pci_address(dev, bar, &size, &flags);
 249	if (addrp == NULL)
 250		return -EINVAL;
 251	return __of_address_to_resource(dev, addrp, size, flags, NULL, r);
 252}
 253EXPORT_SYMBOL_GPL(of_pci_address_to_resource);
 254
 255/*
 256 * of_pci_range_to_resource - Create a resource from an of_pci_range
 257 * @range:	the PCI range that describes the resource
 258 * @np:		device node where the range belongs to
 259 * @res:	pointer to a valid resource that will be updated to
 260 *              reflect the values contained in the range.
 261 *
 262 * Returns EINVAL if the range cannot be converted to resource.
 263 *
 264 * Note that if the range is an IO range, the resource will be converted
 265 * using pci_address_to_pio() which can fail if it is called too early or
 266 * if the range cannot be matched to any host bridge IO space (our case here).
 267 * To guard against that we try to register the IO range first.
 268 * If that fails we know that pci_address_to_pio() will do too.
 269 */
 270int of_pci_range_to_resource(struct of_pci_range *range,
 271			     struct device_node *np, struct resource *res)
 272{
 273	int err;
 274	res->flags = range->flags;
 275	res->parent = res->child = res->sibling = NULL;
 276	res->name = np->full_name;
 277
 
 
 
 278	if (res->flags & IORESOURCE_IO) {
 279		unsigned long port;
 280		err = pci_register_io_range(&np->fwnode, range->cpu_addr,
 281				range->size);
 282		if (err)
 283			goto invalid_range;
 284		port = pci_address_to_pio(range->cpu_addr);
 285		if (port == (unsigned long)-1) {
 286			err = -EINVAL;
 287			goto invalid_range;
 288		}
 289		res->start = port;
 290	} else {
 291		if ((sizeof(resource_size_t) < 8) &&
 292		    upper_32_bits(range->cpu_addr)) {
 293			err = -EINVAL;
 294			goto invalid_range;
 295		}
 296
 297		res->start = range->cpu_addr;
 298	}
 299	res->end = res->start + range->size - 1;
 300	return 0;
 301
 302invalid_range:
 303	res->start = (resource_size_t)OF_BAD_ADDR;
 304	res->end = (resource_size_t)OF_BAD_ADDR;
 305	return err;
 306}
 307EXPORT_SYMBOL(of_pci_range_to_resource);
 308#endif /* CONFIG_PCI */
 309
 310/*
 311 * ISA bus specific translator
 312 */
 313
 314static int of_bus_isa_match(struct device_node *np)
 315{
 316	return of_node_name_eq(np, "isa");
 317}
 318
 319static void of_bus_isa_count_cells(struct device_node *child,
 320				   int *addrc, int *sizec)
 321{
 322	if (addrc)
 323		*addrc = 2;
 324	if (sizec)
 325		*sizec = 1;
 326}
 327
 328static u64 of_bus_isa_map(__be32 *addr, const __be32 *range, int na, int ns,
 329		int pna)
 330{
 331	u64 cp, s, da;
 332
 333	/* Check address type match */
 334	if ((addr[0] ^ range[0]) & cpu_to_be32(1))
 335		return OF_BAD_ADDR;
 336
 337	/* Read address values, skipping high cell */
 338	cp = of_read_number(range + 1, na - 1);
 339	s  = of_read_number(range + na + pna, ns);
 340	da = of_read_number(addr + 1, na - 1);
 341
 342	pr_debug("ISA map, cp=%llx, s=%llx, da=%llx\n",
 343		 (unsigned long long)cp, (unsigned long long)s,
 344		 (unsigned long long)da);
 345
 346	if (da < cp || da >= (cp + s))
 347		return OF_BAD_ADDR;
 348	return da - cp;
 349}
 350
 351static int of_bus_isa_translate(__be32 *addr, u64 offset, int na)
 352{
 353	return of_bus_default_translate(addr + 1, offset, na - 1);
 354}
 355
 356static unsigned int of_bus_isa_get_flags(const __be32 *addr)
 357{
 358	unsigned int flags = 0;
 359	u32 w = be32_to_cpup(addr);
 360
 361	if (w & 1)
 362		flags |= IORESOURCE_IO;
 363	else
 364		flags |= IORESOURCE_MEM;
 365	return flags;
 366}
 367
 368/*
 369 * Array of bus specific translators
 370 */
 371
 372static struct of_bus of_busses[] = {
 373#ifdef CONFIG_PCI
 374	/* PCI */
 375	{
 376		.name = "pci",
 377		.addresses = "assigned-addresses",
 378		.match = of_bus_pci_match,
 379		.count_cells = of_bus_pci_count_cells,
 380		.map = of_bus_pci_map,
 381		.translate = of_bus_pci_translate,
 382		.has_flags = true,
 383		.get_flags = of_bus_pci_get_flags,
 384	},
 385#endif /* CONFIG_PCI */
 386	/* ISA */
 387	{
 388		.name = "isa",
 389		.addresses = "reg",
 390		.match = of_bus_isa_match,
 391		.count_cells = of_bus_isa_count_cells,
 392		.map = of_bus_isa_map,
 393		.translate = of_bus_isa_translate,
 394		.has_flags = true,
 395		.get_flags = of_bus_isa_get_flags,
 396	},
 397	/* Default */
 398	{
 399		.name = "default",
 400		.addresses = "reg",
 401		.match = NULL,
 402		.count_cells = of_bus_default_count_cells,
 403		.map = of_bus_default_map,
 404		.translate = of_bus_default_translate,
 405		.get_flags = of_bus_default_get_flags,
 406	},
 407};
 408
 409static struct of_bus *of_match_bus(struct device_node *np)
 410{
 411	int i;
 412
 413	for (i = 0; i < ARRAY_SIZE(of_busses); i++)
 414		if (!of_busses[i].match || of_busses[i].match(np))
 415			return &of_busses[i];
 416	BUG();
 417	return NULL;
 418}
 419
 420static int of_empty_ranges_quirk(struct device_node *np)
 421{
 422	if (IS_ENABLED(CONFIG_PPC)) {
 423		/* To save cycles, we cache the result for global "Mac" setting */
 424		static int quirk_state = -1;
 425
 426		/* PA-SEMI sdc DT bug */
 427		if (of_device_is_compatible(np, "1682m-sdc"))
 428			return true;
 429
 430		/* Make quirk cached */
 431		if (quirk_state < 0)
 432			quirk_state =
 433				of_machine_is_compatible("Power Macintosh") ||
 434				of_machine_is_compatible("MacRISC");
 435		return quirk_state;
 436	}
 437	return false;
 438}
 439
 440static int of_translate_one(struct device_node *parent, struct of_bus *bus,
 441			    struct of_bus *pbus, __be32 *addr,
 442			    int na, int ns, int pna, const char *rprop)
 443{
 444	const __be32 *ranges;
 445	unsigned int rlen;
 446	int rone;
 447	u64 offset = OF_BAD_ADDR;
 448
 449	/*
 450	 * Normally, an absence of a "ranges" property means we are
 451	 * crossing a non-translatable boundary, and thus the addresses
 452	 * below the current cannot be converted to CPU physical ones.
 453	 * Unfortunately, while this is very clear in the spec, it's not
 454	 * what Apple understood, and they do have things like /uni-n or
 455	 * /ht nodes with no "ranges" property and a lot of perfectly
 456	 * useable mapped devices below them. Thus we treat the absence of
 457	 * "ranges" as equivalent to an empty "ranges" property which means
 458	 * a 1:1 translation at that level. It's up to the caller not to try
 459	 * to translate addresses that aren't supposed to be translated in
 460	 * the first place. --BenH.
 461	 *
 462	 * As far as we know, this damage only exists on Apple machines, so
 463	 * This code is only enabled on powerpc. --gcl
 464	 *
 465	 * This quirk also applies for 'dma-ranges' which frequently exist in
 466	 * child nodes without 'dma-ranges' in the parent nodes. --RobH
 467	 */
 468	ranges = of_get_property(parent, rprop, &rlen);
 469	if (ranges == NULL && !of_empty_ranges_quirk(parent) &&
 470	    strcmp(rprop, "dma-ranges")) {
 471		pr_debug("no ranges; cannot translate\n");
 472		return 1;
 473	}
 474	if (ranges == NULL || rlen == 0) {
 475		offset = of_read_number(addr, na);
 476		memset(addr, 0, pna * 4);
 477		pr_debug("empty ranges; 1:1 translation\n");
 478		goto finish;
 479	}
 480
 481	pr_debug("walking ranges...\n");
 482
 483	/* Now walk through the ranges */
 484	rlen /= 4;
 485	rone = na + pna + ns;
 486	for (; rlen >= rone; rlen -= rone, ranges += rone) {
 487		offset = bus->map(addr, ranges, na, ns, pna);
 488		if (offset != OF_BAD_ADDR)
 489			break;
 490	}
 491	if (offset == OF_BAD_ADDR) {
 492		pr_debug("not found !\n");
 493		return 1;
 494	}
 495	memcpy(addr, ranges + na, 4 * pna);
 496
 497 finish:
 498	of_dump_addr("parent translation for:", addr, pna);
 499	pr_debug("with offset: %llx\n", (unsigned long long)offset);
 500
 501	/* Translate it into parent bus space */
 502	return pbus->translate(addr, offset, pna);
 503}
 504
 505/*
 506 * Translate an address from the device-tree into a CPU physical address,
 507 * this walks up the tree and applies the various bus mappings on the
 508 * way.
 509 *
 510 * Note: We consider that crossing any level with #size-cells == 0 to mean
 511 * that translation is impossible (that is we are not dealing with a value
 512 * that can be mapped to a cpu physical address). This is not really specified
 513 * that way, but this is traditionally the way IBM at least do things
 514 *
 515 * Whenever the translation fails, the *host pointer will be set to the
 516 * device that had registered logical PIO mapping, and the return code is
 517 * relative to that node.
 518 */
 519static u64 __of_translate_address(struct device_node *dev,
 520				  struct device_node *(*get_parent)(const struct device_node *),
 521				  const __be32 *in_addr, const char *rprop,
 522				  struct device_node **host)
 523{
 524	struct device_node *parent = NULL;
 525	struct of_bus *bus, *pbus;
 526	__be32 addr[OF_MAX_ADDR_CELLS];
 527	int na, ns, pna, pns;
 528	u64 result = OF_BAD_ADDR;
 529
 530	pr_debug("** translation for device %pOF **\n", dev);
 531
 532	/* Increase refcount at current level */
 533	of_node_get(dev);
 534
 535	*host = NULL;
 536	/* Get parent & match bus type */
 537	parent = get_parent(dev);
 538	if (parent == NULL)
 539		goto bail;
 540	bus = of_match_bus(parent);
 541
 542	/* Count address cells & copy address locally */
 543	bus->count_cells(dev, &na, &ns);
 544	if (!OF_CHECK_COUNTS(na, ns)) {
 545		pr_debug("Bad cell count for %pOF\n", dev);
 546		goto bail;
 547	}
 548	memcpy(addr, in_addr, na * 4);
 549
 550	pr_debug("bus is %s (na=%d, ns=%d) on %pOF\n",
 551	    bus->name, na, ns, parent);
 552	of_dump_addr("translating address:", addr, na);
 553
 554	/* Translate */
 555	for (;;) {
 556		struct logic_pio_hwaddr *iorange;
 557
 558		/* Switch to parent bus */
 559		of_node_put(dev);
 560		dev = parent;
 561		parent = get_parent(dev);
 562
 563		/* If root, we have finished */
 564		if (parent == NULL) {
 565			pr_debug("reached root node\n");
 566			result = of_read_number(addr, na);
 567			break;
 568		}
 569
 570		/*
 571		 * For indirectIO device which has no ranges property, get
 572		 * the address from reg directly.
 573		 */
 574		iorange = find_io_range_by_fwnode(&dev->fwnode);
 575		if (iorange && (iorange->flags != LOGIC_PIO_CPU_MMIO)) {
 576			result = of_read_number(addr + 1, na - 1);
 577			pr_debug("indirectIO matched(%pOF) 0x%llx\n",
 578				 dev, result);
 579			*host = of_node_get(dev);
 580			break;
 581		}
 582
 583		/* Get new parent bus and counts */
 584		pbus = of_match_bus(parent);
 585		pbus->count_cells(dev, &pna, &pns);
 586		if (!OF_CHECK_COUNTS(pna, pns)) {
 587			pr_err("Bad cell count for %pOF\n", dev);
 588			break;
 589		}
 590
 591		pr_debug("parent bus is %s (na=%d, ns=%d) on %pOF\n",
 592		    pbus->name, pna, pns, parent);
 593
 594		/* Apply bus translation */
 595		if (of_translate_one(dev, bus, pbus, addr, na, ns, pna, rprop))
 596			break;
 597
 598		/* Complete the move up one level */
 599		na = pna;
 600		ns = pns;
 601		bus = pbus;
 602
 603		of_dump_addr("one level translation:", addr, na);
 604	}
 605 bail:
 606	of_node_put(parent);
 607	of_node_put(dev);
 608
 609	return result;
 610}
 611
 612u64 of_translate_address(struct device_node *dev, const __be32 *in_addr)
 613{
 614	struct device_node *host;
 615	u64 ret;
 616
 617	ret = __of_translate_address(dev, of_get_parent,
 618				     in_addr, "ranges", &host);
 619	if (host) {
 620		of_node_put(host);
 621		return OF_BAD_ADDR;
 622	}
 623
 624	return ret;
 625}
 626EXPORT_SYMBOL(of_translate_address);
 627
 628static struct device_node *__of_get_dma_parent(const struct device_node *np)
 
 629{
 630	struct of_phandle_args args;
 631	int ret, index;
 632
 633	index = of_property_match_string(np, "interconnect-names", "dma-mem");
 634	if (index < 0)
 635		return of_get_parent(np);
 636
 637	ret = of_parse_phandle_with_args(np, "interconnects",
 638					 "#interconnect-cells",
 639					 index, &args);
 640	if (ret < 0)
 641		return of_get_parent(np);
 642
 643	return of_node_get(args.np);
 644}
 
 645
 646static struct device_node *of_get_next_dma_parent(struct device_node *np)
 647{
 648	struct device_node *parent;
 649
 650	parent = __of_get_dma_parent(np);
 651	of_node_put(np);
 652
 653	return parent;
 654}
 655
 656u64 of_translate_dma_address(struct device_node *dev, const __be32 *in_addr)
 657{
 658	struct device_node *host;
 659	u64 ret;
 660
 661	ret = __of_translate_address(dev, __of_get_dma_parent,
 662				     in_addr, "dma-ranges", &host);
 663
 664	if (host) {
 665		of_node_put(host);
 666		return OF_BAD_ADDR;
 667	}
 668
 669	return ret;
 670}
 671EXPORT_SYMBOL(of_translate_dma_address);
 672
 673const __be32 *of_get_address(struct device_node *dev, int index, u64 *size,
 674		    unsigned int *flags)
 675{
 676	const __be32 *prop;
 677	unsigned int psize;
 678	struct device_node *parent;
 679	struct of_bus *bus;
 680	int onesize, i, na, ns;
 681
 682	/* Get parent & match bus type */
 683	parent = of_get_parent(dev);
 684	if (parent == NULL)
 685		return NULL;
 686	bus = of_match_bus(parent);
 
 
 
 
 687	bus->count_cells(dev, &na, &ns);
 688	of_node_put(parent);
 689	if (!OF_CHECK_ADDR_COUNT(na))
 690		return NULL;
 691
 692	/* Get "reg" or "assigned-addresses" property */
 693	prop = of_get_property(dev, bus->addresses, &psize);
 694	if (prop == NULL)
 695		return NULL;
 696	psize /= 4;
 697
 698	onesize = na + ns;
 699	for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++)
 700		if (i == index) {
 
 
 
 701			if (size)
 702				*size = of_read_number(prop + na, ns);
 703			if (flags)
 704				*flags = bus->get_flags(prop);
 705			return prop;
 706		}
 
 707	return NULL;
 708}
 709EXPORT_SYMBOL(of_get_address);
 710
 711static int parser_init(struct of_pci_range_parser *parser,
 712			struct device_node *node, const char *name)
 713{
 714	int rlen;
 715
 716	parser->node = node;
 717	parser->pna = of_n_addr_cells(node);
 718	parser->na = of_bus_n_addr_cells(node);
 719	parser->ns = of_bus_n_size_cells(node);
 720	parser->dma = !strcmp(name, "dma-ranges");
 721	parser->bus = of_match_bus(node);
 722
 723	parser->range = of_get_property(node, name, &rlen);
 724	if (parser->range == NULL)
 725		return -ENOENT;
 726
 727	parser->end = parser->range + rlen / sizeof(__be32);
 728
 729	return 0;
 730}
 731
 732int of_pci_range_parser_init(struct of_pci_range_parser *parser,
 733				struct device_node *node)
 734{
 735	return parser_init(parser, node, "ranges");
 736}
 737EXPORT_SYMBOL_GPL(of_pci_range_parser_init);
 738
 739int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
 740				struct device_node *node)
 741{
 742	return parser_init(parser, node, "dma-ranges");
 743}
 744EXPORT_SYMBOL_GPL(of_pci_dma_range_parser_init);
 745#define of_dma_range_parser_init of_pci_dma_range_parser_init
 746
 747struct of_pci_range *of_pci_range_parser_one(struct of_pci_range_parser *parser,
 748						struct of_pci_range *range)
 749{
 750	int na = parser->na;
 751	int ns = parser->ns;
 752	int np = parser->pna + na + ns;
 753	int busflag_na = 0;
 754
 755	if (!range)
 756		return NULL;
 757
 758	if (!parser->range || parser->range + np > parser->end)
 759		return NULL;
 760
 761	range->flags = parser->bus->get_flags(parser->range);
 762
 763	/* A extra cell for resource flags */
 764	if (parser->bus->has_flags)
 765		busflag_na = 1;
 766
 767	range->bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
 768
 769	if (parser->dma)
 770		range->cpu_addr = of_translate_dma_address(parser->node,
 771				parser->range + na);
 772	else
 773		range->cpu_addr = of_translate_address(parser->node,
 774				parser->range + na);
 775	range->size = of_read_number(parser->range + parser->pna + na, ns);
 776
 777	parser->range += np;
 778
 779	/* Now consume following elements while they are contiguous */
 780	while (parser->range + np <= parser->end) {
 781		u32 flags = 0;
 782		u64 bus_addr, cpu_addr, size;
 783
 784		flags = parser->bus->get_flags(parser->range);
 785		bus_addr = of_read_number(parser->range + busflag_na, na - busflag_na);
 786		if (parser->dma)
 787			cpu_addr = of_translate_dma_address(parser->node,
 788					parser->range + na);
 789		else
 790			cpu_addr = of_translate_address(parser->node,
 791					parser->range + na);
 792		size = of_read_number(parser->range + parser->pna + na, ns);
 793
 794		if (flags != range->flags)
 795			break;
 796		if (bus_addr != range->bus_addr + range->size ||
 797		    cpu_addr != range->cpu_addr + range->size)
 798			break;
 799
 800		range->size += size;
 801		parser->range += np;
 802	}
 803
 804	return range;
 805}
 806EXPORT_SYMBOL_GPL(of_pci_range_parser_one);
 807
 808static u64 of_translate_ioport(struct device_node *dev, const __be32 *in_addr,
 809			u64 size)
 810{
 811	u64 taddr;
 812	unsigned long port;
 813	struct device_node *host;
 814
 815	taddr = __of_translate_address(dev, of_get_parent,
 816				       in_addr, "ranges", &host);
 817	if (host) {
 818		/* host-specific port access */
 819		port = logic_pio_trans_hwaddr(&host->fwnode, taddr, size);
 820		of_node_put(host);
 821	} else {
 822		/* memory-mapped I/O range */
 823		port = pci_address_to_pio(taddr);
 824	}
 825
 826	if (port == (unsigned long)-1)
 827		return OF_BAD_ADDR;
 828
 829	return port;
 830}
 831
 832static int __of_address_to_resource(struct device_node *dev,
 833		const __be32 *addrp, u64 size, unsigned int flags,
 834		const char *name, struct resource *r)
 835{
 836	u64 taddr;
 
 
 
 
 
 
 
 
 
 
 
 
 837
 838	if (flags & IORESOURCE_MEM)
 839		taddr = of_translate_address(dev, addrp);
 840	else if (flags & IORESOURCE_IO)
 841		taddr = of_translate_ioport(dev, addrp, size);
 842	else
 843		return -EINVAL;
 844
 845	if (taddr == OF_BAD_ADDR)
 846		return -EINVAL;
 847	memset(r, 0, sizeof(struct resource));
 848
 
 
 
 849	r->start = taddr;
 850	r->end = taddr + size - 1;
 851	r->flags = flags;
 852	r->name = name ? name : dev->full_name;
 853
 854	return 0;
 855}
 856
 857/**
 858 * of_address_to_resource - Translate device tree address and return as resource
 
 
 
 859 *
 860 * Note that if your address is a PIO address, the conversion will fail if
 861 * the physical address can't be internally converted to an IO token with
 862 * pci_address_to_pio(), that is because it's either called too early or it
 863 * can't be matched to any host bridge IO space
 864 */
 865int of_address_to_resource(struct device_node *dev, int index,
 866			   struct resource *r)
 867{
 868	const __be32	*addrp;
 869	u64		size;
 870	unsigned int	flags;
 871	const char	*name = NULL;
 872
 873	addrp = of_get_address(dev, index, &size, &flags);
 874	if (addrp == NULL)
 875		return -EINVAL;
 876
 877	/* Get optional "reg-names" property to add a name to a resource */
 878	of_property_read_string_index(dev, "reg-names",	index, &name);
 879
 880	return __of_address_to_resource(dev, addrp, size, flags, name, r);
 881}
 882EXPORT_SYMBOL_GPL(of_address_to_resource);
 883
 884/**
 885 * of_iomap - Maps the memory mapped IO for a given device_node
 886 * @np:		the device whose io range will be mapped
 887 * @index:	index of the io range
 888 *
 889 * Returns a pointer to the mapped memory
 890 */
 891void __iomem *of_iomap(struct device_node *np, int index)
 892{
 893	struct resource res;
 894
 895	if (of_address_to_resource(np, index, &res))
 896		return NULL;
 897
 898	return ioremap(res.start, resource_size(&res));
 
 
 
 899}
 900EXPORT_SYMBOL(of_iomap);
 901
 902/*
 903 * of_io_request_and_map - Requests a resource and maps the memory mapped IO
 904 *			   for a given device_node
 905 * @device:	the device whose io range will be mapped
 906 * @index:	index of the io range
 907 * @name:	name "override" for the memory region request or NULL
 908 *
 909 * Returns a pointer to the requested and mapped memory or an ERR_PTR() encoded
 910 * error code on failure. Usage example:
 911 *
 912 *	base = of_io_request_and_map(node, 0, "foo");
 913 *	if (IS_ERR(base))
 914 *		return PTR_ERR(base);
 915 */
 916void __iomem *of_io_request_and_map(struct device_node *np, int index,
 917				    const char *name)
 918{
 919	struct resource res;
 920	void __iomem *mem;
 921
 922	if (of_address_to_resource(np, index, &res))
 923		return IOMEM_ERR_PTR(-EINVAL);
 924
 925	if (!name)
 926		name = res.name;
 927	if (!request_mem_region(res.start, resource_size(&res), name))
 928		return IOMEM_ERR_PTR(-EBUSY);
 929
 930	mem = ioremap(res.start, resource_size(&res));
 
 
 
 
 931	if (!mem) {
 932		release_mem_region(res.start, resource_size(&res));
 933		return IOMEM_ERR_PTR(-ENOMEM);
 934	}
 935
 936	return mem;
 937}
 938EXPORT_SYMBOL(of_io_request_and_map);
 939
 
 940/**
 941 * of_dma_get_range - Get DMA range info
 942 * @np:		device node to get DMA range info
 943 * @dma_addr:	pointer to store initial DMA address of DMA range
 944 * @paddr:	pointer to store initial CPU address of DMA range
 945 * @size:	pointer to store size of DMA range
 946 *
 947 * Look in bottom up direction for the first "dma-ranges" property
 948 * and parse it.
 949 *  dma-ranges format:
 
 950 *	DMA addr (dma_addr)	: naddr cells
 951 *	CPU addr (phys_addr_t)	: pna cells
 952 *	size			: nsize cells
 953 *
 954 * It returns -ENODEV if "dma-ranges" property was not found
 955 * for this device in DT.
 956 */
 957int of_dma_get_range(struct device_node *np, u64 *dma_addr, u64 *paddr, u64 *size)
 958{
 959	struct device_node *node = of_node_get(np);
 960	const __be32 *ranges = NULL;
 961	int len;
 962	int ret = 0;
 963	bool found_dma_ranges = false;
 964	struct of_range_parser parser;
 965	struct of_range range;
 966	u64 dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
 
 
 967
 968	while (node) {
 969		ranges = of_get_property(node, "dma-ranges", &len);
 970
 971		/* Ignore empty ranges, they imply no translation required */
 972		if (ranges && len > 0)
 973			break;
 974
 975		/* Once we find 'dma-ranges', then a missing one is an error */
 976		if (found_dma_ranges && !ranges) {
 977			ret = -ENODEV;
 978			goto out;
 979		}
 980		found_dma_ranges = true;
 981
 982		node = of_get_next_dma_parent(node);
 983	}
 984
 985	if (!node || !ranges) {
 986		pr_debug("no dma-ranges found for node(%pOF)\n", np);
 987		ret = -ENODEV;
 988		goto out;
 989	}
 990
 991	of_dma_range_parser_init(&parser, node);
 992
 993	for_each_of_range(&parser, &range) {
 994		pr_debug("dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
 995			 range.bus_addr, range.cpu_addr, range.size);
 996
 997		if (dma_offset && range.cpu_addr - range.bus_addr != dma_offset) {
 998			pr_warn("Can't handle multiple dma-ranges with different offsets on node(%pOF)\n", node);
 999			/* Don't error out as we'd break some existing DTs */
1000			continue;
1001		}
1002		if (range.cpu_addr == OF_BAD_ADDR) {
1003			pr_err("translation of DMA address(%llx) to CPU address failed node(%pOF)\n",
1004			       range.bus_addr, node);
1005			continue;
1006		}
1007		dma_offset = range.cpu_addr - range.bus_addr;
1008
1009		/* Take lower and upper limits */
1010		if (range.bus_addr < dma_start)
1011			dma_start = range.bus_addr;
1012		if (range.bus_addr + range.size > dma_end)
1013			dma_end = range.bus_addr + range.size;
1014	}
1015
1016	if (dma_start >= dma_end) {
1017		ret = -EINVAL;
1018		pr_debug("Invalid DMA ranges configuration on node(%pOF)\n",
1019			 node);
1020		goto out;
1021	}
1022
1023	*dma_addr = dma_start;
1024	*size = dma_end - dma_start;
1025	*paddr = dma_start + dma_offset;
1026
1027	pr_debug("final: dma_addr(%llx) cpu_addr(%llx) size(%llx)\n",
1028		 *dma_addr, *paddr, *size);
1029
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1030out:
1031	of_node_put(node);
 
 
 
1032
1033	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1034}
1035
1036/**
1037 * of_dma_is_coherent - Check if device is coherent
1038 * @np:	device node
1039 *
1040 * It returns true if "dma-coherent" property was found
1041 * for this device in the DT, or if DMA is coherent by
1042 * default for OF devices on the current platform.
 
1043 */
1044bool of_dma_is_coherent(struct device_node *np)
1045{
1046	struct device_node *node = of_node_get(np);
 
1047
1048	if (IS_ENABLED(CONFIG_OF_DMA_DEFAULT_COHERENT))
1049		return true;
1050
1051	while (node) {
1052		if (of_property_read_bool(node, "dma-coherent")) {
1053			of_node_put(node);
1054			return true;
 
 
 
 
1055		}
1056		node = of_get_next_dma_parent(node);
1057	}
1058	of_node_put(node);
1059	return false;
1060}
1061EXPORT_SYMBOL_GPL(of_dma_is_coherent);