Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PCI <-> OF mapping helpers
4 *
5 * Copyright 2011 IBM Corp.
6 */
7#define pr_fmt(fmt) "PCI: OF: " fmt
8
9#include <linux/irqdomain.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/of.h>
13#include <linux/of_irq.h>
14#include <linux/of_address.h>
15#include <linux/of_pci.h>
16#include "pci.h"
17
18#ifdef CONFIG_PCI
19void pci_set_of_node(struct pci_dev *dev)
20{
21 if (!dev->bus->dev.of_node)
22 return;
23 dev->dev.of_node = of_pci_find_child_device(dev->bus->dev.of_node,
24 dev->devfn);
25 if (dev->dev.of_node)
26 dev->dev.fwnode = &dev->dev.of_node->fwnode;
27}
28
29void pci_release_of_node(struct pci_dev *dev)
30{
31 of_node_put(dev->dev.of_node);
32 dev->dev.of_node = NULL;
33 dev->dev.fwnode = NULL;
34}
35
36void pci_set_bus_of_node(struct pci_bus *bus)
37{
38 struct device_node *node;
39
40 if (bus->self == NULL) {
41 node = pcibios_get_phb_of_node(bus);
42 } else {
43 node = of_node_get(bus->self->dev.of_node);
44 if (node && of_property_read_bool(node, "external-facing"))
45 bus->self->external_facing = true;
46 }
47
48 bus->dev.of_node = node;
49
50 if (bus->dev.of_node)
51 bus->dev.fwnode = &bus->dev.of_node->fwnode;
52}
53
54void pci_release_bus_of_node(struct pci_bus *bus)
55{
56 of_node_put(bus->dev.of_node);
57 bus->dev.of_node = NULL;
58 bus->dev.fwnode = NULL;
59}
60
61struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
62{
63 /* This should only be called for PHBs */
64 if (WARN_ON(bus->self || bus->parent))
65 return NULL;
66
67 /*
68 * Look for a node pointer in either the intermediary device we
69 * create above the root bus or its own parent. Normally only
70 * the later is populated.
71 */
72 if (bus->bridge->of_node)
73 return of_node_get(bus->bridge->of_node);
74 if (bus->bridge->parent && bus->bridge->parent->of_node)
75 return of_node_get(bus->bridge->parent->of_node);
76 return NULL;
77}
78
79struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
80{
81#ifdef CONFIG_IRQ_DOMAIN
82 struct irq_domain *d;
83
84 if (!bus->dev.of_node)
85 return NULL;
86
87 /* Start looking for a phandle to an MSI controller. */
88 d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
89 if (d)
90 return d;
91
92 /*
93 * If we don't have an msi-parent property, look for a domain
94 * directly attached to the host bridge.
95 */
96 d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
97 if (d)
98 return d;
99
100 return irq_find_host(bus->dev.of_node);
101#else
102 return NULL;
103#endif
104}
105
106static inline int __of_pci_pci_compare(struct device_node *node,
107 unsigned int data)
108{
109 int devfn;
110
111 devfn = of_pci_get_devfn(node);
112 if (devfn < 0)
113 return 0;
114
115 return devfn == data;
116}
117
118struct device_node *of_pci_find_child_device(struct device_node *parent,
119 unsigned int devfn)
120{
121 struct device_node *node, *node2;
122
123 for_each_child_of_node(parent, node) {
124 if (__of_pci_pci_compare(node, devfn))
125 return node;
126 /*
127 * Some OFs create a parent node "multifunc-device" as
128 * a fake root for all functions of a multi-function
129 * device we go down them as well.
130 */
131 if (of_node_name_eq(node, "multifunc-device")) {
132 for_each_child_of_node(node, node2) {
133 if (__of_pci_pci_compare(node2, devfn)) {
134 of_node_put(node);
135 return node2;
136 }
137 }
138 }
139 }
140 return NULL;
141}
142EXPORT_SYMBOL_GPL(of_pci_find_child_device);
143
144/**
145 * of_pci_get_devfn() - Get device and function numbers for a device node
146 * @np: device node
147 *
148 * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
149 * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
150 * and function numbers respectively. On error a negative error code is
151 * returned.
152 */
153int of_pci_get_devfn(struct device_node *np)
154{
155 u32 reg[5];
156 int error;
157
158 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
159 if (error)
160 return error;
161
162 return (reg[0] >> 8) & 0xff;
163}
164EXPORT_SYMBOL_GPL(of_pci_get_devfn);
165
166/**
167 * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
168 * @node: device node
169 * @res: address to a struct resource to return the bus-range
170 *
171 * Returns 0 on success or a negative error-code on failure.
172 */
173int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
174{
175 u32 bus_range[2];
176 int error;
177
178 error = of_property_read_u32_array(node, "bus-range", bus_range,
179 ARRAY_SIZE(bus_range));
180 if (error)
181 return error;
182
183 res->name = node->name;
184 res->start = bus_range[0];
185 res->end = bus_range[1];
186 res->flags = IORESOURCE_BUS;
187
188 return 0;
189}
190EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
191
192/**
193 * This function will try to obtain the host bridge domain number by
194 * finding a property called "linux,pci-domain" of the given device node.
195 *
196 * @node: device tree node with the domain information
197 *
198 * Returns the associated domain number from DT in the range [0-0xffff], or
199 * a negative value if the required property is not found.
200 */
201int of_get_pci_domain_nr(struct device_node *node)
202{
203 u32 domain;
204 int error;
205
206 error = of_property_read_u32(node, "linux,pci-domain", &domain);
207 if (error)
208 return error;
209
210 return (u16)domain;
211}
212EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
213
214/**
215 * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
216 * is present and valid
217 */
218void of_pci_check_probe_only(void)
219{
220 u32 val;
221 int ret;
222
223 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
224 if (ret) {
225 if (ret == -ENODATA || ret == -EOVERFLOW)
226 pr_warn("linux,pci-probe-only without valid value, ignoring\n");
227 return;
228 }
229
230 if (val)
231 pci_add_flags(PCI_PROBE_ONLY);
232 else
233 pci_clear_flags(PCI_PROBE_ONLY);
234
235 pr_info("PROBE_ONLY %sabled\n", val ? "en" : "dis");
236}
237EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
238
239/**
240 * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
241 * host bridge resources from DT
242 * @dev: host bridge device
243 * @busno: bus number associated with the bridge root bus
244 * @bus_max: maximum number of buses for this bridge
245 * @resources: list where the range of resources will be added after DT parsing
246 * @ib_resources: list where the range of inbound resources (with addresses
247 * from 'dma-ranges') will be added after DT parsing
248 * @io_base: pointer to a variable that will contain on return the physical
249 * address for the start of the I/O range. Can be NULL if the caller doesn't
250 * expect I/O ranges to be present in the device tree.
251 *
252 * This function will parse the "ranges" property of a PCI host bridge device
253 * node and setup the resource mapping based on its content. It is expected
254 * that the property conforms with the Power ePAPR document.
255 *
256 * It returns zero if the range parsing has been successful or a standard error
257 * value if it failed.
258 */
259static int devm_of_pci_get_host_bridge_resources(struct device *dev,
260 unsigned char busno, unsigned char bus_max,
261 struct list_head *resources,
262 struct list_head *ib_resources,
263 resource_size_t *io_base)
264{
265 struct device_node *dev_node = dev->of_node;
266 struct resource *res, tmp_res;
267 struct resource *bus_range;
268 struct of_pci_range range;
269 struct of_pci_range_parser parser;
270 const char *range_type;
271 int err;
272
273 if (io_base)
274 *io_base = (resource_size_t)OF_BAD_ADDR;
275
276 bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
277 if (!bus_range)
278 return -ENOMEM;
279
280 dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
281
282 err = of_pci_parse_bus_range(dev_node, bus_range);
283 if (err) {
284 bus_range->start = busno;
285 bus_range->end = bus_max;
286 bus_range->flags = IORESOURCE_BUS;
287 dev_info(dev, " No bus range found for %pOF, using %pR\n",
288 dev_node, bus_range);
289 } else {
290 if (bus_range->end > bus_range->start + bus_max)
291 bus_range->end = bus_range->start + bus_max;
292 }
293 pci_add_resource(resources, bus_range);
294
295 /* Check for ranges property */
296 err = of_pci_range_parser_init(&parser, dev_node);
297 if (err)
298 goto failed;
299
300 dev_dbg(dev, "Parsing ranges property...\n");
301 for_each_of_pci_range(&parser, &range) {
302 /* Read next ranges element */
303 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
304 range_type = "IO";
305 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
306 range_type = "MEM";
307 else
308 range_type = "err";
309 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
310 range_type, range.cpu_addr,
311 range.cpu_addr + range.size - 1, range.pci_addr);
312
313 /*
314 * If we failed translation or got a zero-sized region
315 * then skip this range
316 */
317 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
318 continue;
319
320 err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
321 if (err)
322 continue;
323
324 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
325 if (!res) {
326 err = -ENOMEM;
327 goto failed;
328 }
329
330 if (resource_type(res) == IORESOURCE_IO) {
331 if (!io_base) {
332 dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
333 dev_node);
334 err = -EINVAL;
335 goto failed;
336 }
337 if (*io_base != (resource_size_t)OF_BAD_ADDR)
338 dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
339 dev_node);
340 *io_base = range.cpu_addr;
341 }
342
343 pci_add_resource_offset(resources, res, res->start - range.pci_addr);
344 }
345
346 /* Check for dma-ranges property */
347 if (!ib_resources)
348 return 0;
349 err = of_pci_dma_range_parser_init(&parser, dev_node);
350 if (err)
351 return 0;
352
353 dev_dbg(dev, "Parsing dma-ranges property...\n");
354 for_each_of_pci_range(&parser, &range) {
355 struct resource_entry *entry;
356 /*
357 * If we failed translation or got a zero-sized region
358 * then skip this range
359 */
360 if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
361 range.cpu_addr == OF_BAD_ADDR || range.size == 0)
362 continue;
363
364 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
365 "IB MEM", range.cpu_addr,
366 range.cpu_addr + range.size - 1, range.pci_addr);
367
368
369 err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
370 if (err)
371 continue;
372
373 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
374 if (!res) {
375 err = -ENOMEM;
376 goto failed;
377 }
378
379 /* Keep the resource list sorted */
380 resource_list_for_each_entry(entry, ib_resources)
381 if (entry->res->start > res->start)
382 break;
383
384 pci_add_resource_offset(&entry->node, res,
385 res->start - range.pci_addr);
386 }
387
388 return 0;
389
390failed:
391 pci_free_resource_list(resources);
392 return err;
393}
394
395#if IS_ENABLED(CONFIG_OF_IRQ)
396/**
397 * of_irq_parse_pci - Resolve the interrupt for a PCI device
398 * @pdev: the device whose interrupt is to be resolved
399 * @out_irq: structure of_phandle_args filled by this function
400 *
401 * This function resolves the PCI interrupt for a given PCI device. If a
402 * device-node exists for a given pci_dev, it will use normal OF tree
403 * walking. If not, it will implement standard swizzling and walk up the
404 * PCI tree until an device-node is found, at which point it will finish
405 * resolving using the OF tree walking.
406 */
407static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
408{
409 struct device_node *dn, *ppnode;
410 struct pci_dev *ppdev;
411 __be32 laddr[3];
412 u8 pin;
413 int rc;
414
415 /*
416 * Check if we have a device node, if yes, fallback to standard
417 * device tree parsing
418 */
419 dn = pci_device_to_OF_node(pdev);
420 if (dn) {
421 rc = of_irq_parse_one(dn, 0, out_irq);
422 if (!rc)
423 return rc;
424 }
425
426 /*
427 * Ok, we don't, time to have fun. Let's start by building up an
428 * interrupt spec. we assume #interrupt-cells is 1, which is standard
429 * for PCI. If you do different, then don't use that routine.
430 */
431 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
432 if (rc != 0)
433 goto err;
434 /* No pin, exit with no error message. */
435 if (pin == 0)
436 return -ENODEV;
437
438 /* Now we walk up the PCI tree */
439 for (;;) {
440 /* Get the pci_dev of our parent */
441 ppdev = pdev->bus->self;
442
443 /* Ouch, it's a host bridge... */
444 if (ppdev == NULL) {
445 ppnode = pci_bus_to_OF_node(pdev->bus);
446
447 /* No node for host bridge ? give up */
448 if (ppnode == NULL) {
449 rc = -EINVAL;
450 goto err;
451 }
452 } else {
453 /* We found a P2P bridge, check if it has a node */
454 ppnode = pci_device_to_OF_node(ppdev);
455 }
456
457 /*
458 * Ok, we have found a parent with a device-node, hand over to
459 * the OF parsing code.
460 * We build a unit address from the linux device to be used for
461 * resolution. Note that we use the linux bus number which may
462 * not match your firmware bus numbering.
463 * Fortunately, in most cases, interrupt-map-mask doesn't
464 * include the bus number as part of the matching.
465 * You should still be careful about that though if you intend
466 * to rely on this function (you ship a firmware that doesn't
467 * create device nodes for all PCI devices).
468 */
469 if (ppnode)
470 break;
471
472 /*
473 * We can only get here if we hit a P2P bridge with no node;
474 * let's do standard swizzling and try again
475 */
476 pin = pci_swizzle_interrupt_pin(pdev, pin);
477 pdev = ppdev;
478 }
479
480 out_irq->np = ppnode;
481 out_irq->args_count = 1;
482 out_irq->args[0] = pin;
483 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
484 laddr[1] = laddr[2] = cpu_to_be32(0);
485 rc = of_irq_parse_raw(laddr, out_irq);
486 if (rc)
487 goto err;
488 return 0;
489err:
490 if (rc == -ENOENT) {
491 dev_warn(&pdev->dev,
492 "%s: no interrupt-map found, INTx interrupts not available\n",
493 __func__);
494 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
495 __func__);
496 } else {
497 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
498 }
499 return rc;
500}
501
502/**
503 * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
504 * @dev: The PCI device needing an IRQ
505 * @slot: PCI slot number; passed when used as map_irq callback. Unused
506 * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
507 *
508 * @slot and @pin are unused, but included in the function so that this
509 * function can be used directly as the map_irq callback to
510 * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
511 */
512int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
513{
514 struct of_phandle_args oirq;
515 int ret;
516
517 ret = of_irq_parse_pci(dev, &oirq);
518 if (ret)
519 return 0; /* Proper return code 0 == NO_IRQ */
520
521 return irq_create_of_mapping(&oirq);
522}
523EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
524#endif /* CONFIG_OF_IRQ */
525
526static int pci_parse_request_of_pci_ranges(struct device *dev,
527 struct pci_host_bridge *bridge)
528{
529 int err, res_valid = 0;
530 resource_size_t iobase;
531 struct resource_entry *win, *tmp;
532
533 INIT_LIST_HEAD(&bridge->windows);
534 INIT_LIST_HEAD(&bridge->dma_ranges);
535
536 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
537 &bridge->dma_ranges, &iobase);
538 if (err)
539 return err;
540
541 err = devm_request_pci_bus_resources(dev, &bridge->windows);
542 if (err)
543 return err;
544
545 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
546 struct resource *res = win->res;
547
548 switch (resource_type(res)) {
549 case IORESOURCE_IO:
550 err = devm_pci_remap_iospace(dev, res, iobase);
551 if (err) {
552 dev_warn(dev, "error %d: failed to map resource %pR\n",
553 err, res);
554 resource_list_destroy_entry(win);
555 }
556 break;
557 case IORESOURCE_MEM:
558 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
559 break;
560 }
561 }
562
563 if (!res_valid)
564 dev_warn(dev, "non-prefetchable memory resource required\n");
565
566 return 0;
567}
568
569int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
570{
571 if (!dev->of_node)
572 return 0;
573
574 bridge->swizzle_irq = pci_common_swizzle;
575 bridge->map_irq = of_irq_parse_and_map_pci;
576
577 return pci_parse_request_of_pci_ranges(dev, bridge);
578}
579
580#endif /* CONFIG_PCI */
581
582/**
583 * This function will try to find the limitation of link speed by finding
584 * a property called "max-link-speed" of the given device node.
585 *
586 * @node: device tree node with the max link speed information
587 *
588 * Returns the associated max link speed from DT, or a negative value if the
589 * required property is not found or is invalid.
590 */
591int of_pci_get_max_link_speed(struct device_node *node)
592{
593 u32 max_link_speed;
594
595 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
596 max_link_speed == 0 || max_link_speed > 4)
597 return -EINVAL;
598
599 return max_link_speed;
600}
601EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * PCI <-> OF mapping helpers
4 *
5 * Copyright 2011 IBM Corp.
6 */
7#define pr_fmt(fmt) "PCI: OF: " fmt
8
9#include <linux/irqdomain.h>
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/of.h>
13#include <linux/of_irq.h>
14#include <linux/of_address.h>
15#include <linux/of_pci.h>
16#include "pci.h"
17
18#ifdef CONFIG_PCI
19/**
20 * pci_set_of_node - Find and set device's DT device_node
21 * @dev: the PCI device structure to fill
22 *
23 * Returns 0 on success with of_node set or when no device is described in the
24 * DT. Returns -ENODEV if the device is present, but disabled in the DT.
25 */
26int pci_set_of_node(struct pci_dev *dev)
27{
28 struct device_node *node;
29
30 if (!dev->bus->dev.of_node)
31 return 0;
32
33 node = of_pci_find_child_device(dev->bus->dev.of_node, dev->devfn);
34 if (!node)
35 return 0;
36
37 device_set_node(&dev->dev, of_fwnode_handle(node));
38 return 0;
39}
40
41void pci_release_of_node(struct pci_dev *dev)
42{
43 of_node_put(dev->dev.of_node);
44 device_set_node(&dev->dev, NULL);
45}
46
47void pci_set_bus_of_node(struct pci_bus *bus)
48{
49 struct device_node *node;
50
51 if (bus->self == NULL) {
52 node = pcibios_get_phb_of_node(bus);
53 } else {
54 node = of_node_get(bus->self->dev.of_node);
55 if (node && of_property_read_bool(node, "external-facing"))
56 bus->self->external_facing = true;
57 }
58
59 device_set_node(&bus->dev, of_fwnode_handle(node));
60}
61
62void pci_release_bus_of_node(struct pci_bus *bus)
63{
64 of_node_put(bus->dev.of_node);
65 device_set_node(&bus->dev, NULL);
66}
67
68struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus)
69{
70 /* This should only be called for PHBs */
71 if (WARN_ON(bus->self || bus->parent))
72 return NULL;
73
74 /*
75 * Look for a node pointer in either the intermediary device we
76 * create above the root bus or its own parent. Normally only
77 * the later is populated.
78 */
79 if (bus->bridge->of_node)
80 return of_node_get(bus->bridge->of_node);
81 if (bus->bridge->parent && bus->bridge->parent->of_node)
82 return of_node_get(bus->bridge->parent->of_node);
83 return NULL;
84}
85
86struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus)
87{
88#ifdef CONFIG_IRQ_DOMAIN
89 struct irq_domain *d;
90
91 if (!bus->dev.of_node)
92 return NULL;
93
94 /* Start looking for a phandle to an MSI controller. */
95 d = of_msi_get_domain(&bus->dev, bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
96 if (d)
97 return d;
98
99 /*
100 * If we don't have an msi-parent property, look for a domain
101 * directly attached to the host bridge.
102 */
103 d = irq_find_matching_host(bus->dev.of_node, DOMAIN_BUS_PCI_MSI);
104 if (d)
105 return d;
106
107 return irq_find_host(bus->dev.of_node);
108#else
109 return NULL;
110#endif
111}
112
113bool pci_host_of_has_msi_map(struct device *dev)
114{
115 if (dev && dev->of_node)
116 return of_get_property(dev->of_node, "msi-map", NULL);
117 return false;
118}
119
120static inline int __of_pci_pci_compare(struct device_node *node,
121 unsigned int data)
122{
123 int devfn;
124
125 devfn = of_pci_get_devfn(node);
126 if (devfn < 0)
127 return 0;
128
129 return devfn == data;
130}
131
132struct device_node *of_pci_find_child_device(struct device_node *parent,
133 unsigned int devfn)
134{
135 struct device_node *node, *node2;
136
137 for_each_child_of_node(parent, node) {
138 if (__of_pci_pci_compare(node, devfn))
139 return node;
140 /*
141 * Some OFs create a parent node "multifunc-device" as
142 * a fake root for all functions of a multi-function
143 * device we go down them as well.
144 */
145 if (of_node_name_eq(node, "multifunc-device")) {
146 for_each_child_of_node(node, node2) {
147 if (__of_pci_pci_compare(node2, devfn)) {
148 of_node_put(node);
149 return node2;
150 }
151 }
152 }
153 }
154 return NULL;
155}
156EXPORT_SYMBOL_GPL(of_pci_find_child_device);
157
158/**
159 * of_pci_get_devfn() - Get device and function numbers for a device node
160 * @np: device node
161 *
162 * Parses a standard 5-cell PCI resource and returns an 8-bit value that can
163 * be passed to the PCI_SLOT() and PCI_FUNC() macros to extract the device
164 * and function numbers respectively. On error a negative error code is
165 * returned.
166 */
167int of_pci_get_devfn(struct device_node *np)
168{
169 u32 reg[5];
170 int error;
171
172 error = of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg));
173 if (error)
174 return error;
175
176 return (reg[0] >> 8) & 0xff;
177}
178EXPORT_SYMBOL_GPL(of_pci_get_devfn);
179
180/**
181 * of_pci_parse_bus_range() - parse the bus-range property of a PCI device
182 * @node: device node
183 * @res: address to a struct resource to return the bus-range
184 *
185 * Returns 0 on success or a negative error-code on failure.
186 */
187int of_pci_parse_bus_range(struct device_node *node, struct resource *res)
188{
189 u32 bus_range[2];
190 int error;
191
192 error = of_property_read_u32_array(node, "bus-range", bus_range,
193 ARRAY_SIZE(bus_range));
194 if (error)
195 return error;
196
197 res->name = node->name;
198 res->start = bus_range[0];
199 res->end = bus_range[1];
200 res->flags = IORESOURCE_BUS;
201
202 return 0;
203}
204EXPORT_SYMBOL_GPL(of_pci_parse_bus_range);
205
206/**
207 * of_get_pci_domain_nr - Find the host bridge domain number
208 * of the given device node.
209 * @node: Device tree node with the domain information.
210 *
211 * This function will try to obtain the host bridge domain number by finding
212 * a property called "linux,pci-domain" of the given device node.
213 *
214 * Return:
215 * * > 0 - On success, an associated domain number.
216 * * -EINVAL - The property "linux,pci-domain" does not exist.
217 * * -ENODATA - The linux,pci-domain" property does not have value.
218 * * -EOVERFLOW - Invalid "linux,pci-domain" property value.
219 *
220 * Returns the associated domain number from DT in the range [0-0xffff], or
221 * a negative value if the required property is not found.
222 */
223int of_get_pci_domain_nr(struct device_node *node)
224{
225 u32 domain;
226 int error;
227
228 error = of_property_read_u32(node, "linux,pci-domain", &domain);
229 if (error)
230 return error;
231
232 return (u16)domain;
233}
234EXPORT_SYMBOL_GPL(of_get_pci_domain_nr);
235
236/**
237 * of_pci_check_probe_only - Setup probe only mode if linux,pci-probe-only
238 * is present and valid
239 */
240void of_pci_check_probe_only(void)
241{
242 u32 val;
243 int ret;
244
245 ret = of_property_read_u32(of_chosen, "linux,pci-probe-only", &val);
246 if (ret) {
247 if (ret == -ENODATA || ret == -EOVERFLOW)
248 pr_warn("linux,pci-probe-only without valid value, ignoring\n");
249 return;
250 }
251
252 if (val)
253 pci_add_flags(PCI_PROBE_ONLY);
254 else
255 pci_clear_flags(PCI_PROBE_ONLY);
256
257 pr_info("PROBE_ONLY %s\n", val ? "enabled" : "disabled");
258}
259EXPORT_SYMBOL_GPL(of_pci_check_probe_only);
260
261/**
262 * devm_of_pci_get_host_bridge_resources() - Resource-managed parsing of PCI
263 * host bridge resources from DT
264 * @dev: host bridge device
265 * @busno: bus number associated with the bridge root bus
266 * @bus_max: maximum number of buses for this bridge
267 * @resources: list where the range of resources will be added after DT parsing
268 * @ib_resources: list where the range of inbound resources (with addresses
269 * from 'dma-ranges') will be added after DT parsing
270 * @io_base: pointer to a variable that will contain on return the physical
271 * address for the start of the I/O range. Can be NULL if the caller doesn't
272 * expect I/O ranges to be present in the device tree.
273 *
274 * This function will parse the "ranges" property of a PCI host bridge device
275 * node and setup the resource mapping based on its content. It is expected
276 * that the property conforms with the Power ePAPR document.
277 *
278 * It returns zero if the range parsing has been successful or a standard error
279 * value if it failed.
280 */
281static int devm_of_pci_get_host_bridge_resources(struct device *dev,
282 unsigned char busno, unsigned char bus_max,
283 struct list_head *resources,
284 struct list_head *ib_resources,
285 resource_size_t *io_base)
286{
287 struct device_node *dev_node = dev->of_node;
288 struct resource *res, tmp_res;
289 struct resource *bus_range;
290 struct of_pci_range range;
291 struct of_pci_range_parser parser;
292 const char *range_type;
293 int err;
294
295 if (io_base)
296 *io_base = (resource_size_t)OF_BAD_ADDR;
297
298 bus_range = devm_kzalloc(dev, sizeof(*bus_range), GFP_KERNEL);
299 if (!bus_range)
300 return -ENOMEM;
301
302 dev_info(dev, "host bridge %pOF ranges:\n", dev_node);
303
304 err = of_pci_parse_bus_range(dev_node, bus_range);
305 if (err) {
306 bus_range->start = busno;
307 bus_range->end = bus_max;
308 bus_range->flags = IORESOURCE_BUS;
309 dev_info(dev, " No bus range found for %pOF, using %pR\n",
310 dev_node, bus_range);
311 } else {
312 if (bus_range->end > bus_range->start + bus_max)
313 bus_range->end = bus_range->start + bus_max;
314 }
315 pci_add_resource(resources, bus_range);
316
317 /* Check for ranges property */
318 err = of_pci_range_parser_init(&parser, dev_node);
319 if (err)
320 return 0;
321
322 dev_dbg(dev, "Parsing ranges property...\n");
323 for_each_of_pci_range(&parser, &range) {
324 /* Read next ranges element */
325 if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_IO)
326 range_type = "IO";
327 else if ((range.flags & IORESOURCE_TYPE_BITS) == IORESOURCE_MEM)
328 range_type = "MEM";
329 else
330 range_type = "err";
331 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
332 range_type, range.cpu_addr,
333 range.cpu_addr + range.size - 1, range.pci_addr);
334
335 /*
336 * If we failed translation or got a zero-sized region
337 * then skip this range
338 */
339 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
340 continue;
341
342 err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
343 if (err)
344 continue;
345
346 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
347 if (!res) {
348 err = -ENOMEM;
349 goto failed;
350 }
351
352 if (resource_type(res) == IORESOURCE_IO) {
353 if (!io_base) {
354 dev_err(dev, "I/O range found for %pOF. Please provide an io_base pointer to save CPU base address\n",
355 dev_node);
356 err = -EINVAL;
357 goto failed;
358 }
359 if (*io_base != (resource_size_t)OF_BAD_ADDR)
360 dev_warn(dev, "More than one I/O resource converted for %pOF. CPU base address for old range lost!\n",
361 dev_node);
362 *io_base = range.cpu_addr;
363 } else if (resource_type(res) == IORESOURCE_MEM) {
364 res->flags &= ~IORESOURCE_MEM_64;
365 }
366
367 pci_add_resource_offset(resources, res, res->start - range.pci_addr);
368 }
369
370 /* Check for dma-ranges property */
371 if (!ib_resources)
372 return 0;
373 err = of_pci_dma_range_parser_init(&parser, dev_node);
374 if (err)
375 return 0;
376
377 dev_dbg(dev, "Parsing dma-ranges property...\n");
378 for_each_of_pci_range(&parser, &range) {
379 /*
380 * If we failed translation or got a zero-sized region
381 * then skip this range
382 */
383 if (((range.flags & IORESOURCE_TYPE_BITS) != IORESOURCE_MEM) ||
384 range.cpu_addr == OF_BAD_ADDR || range.size == 0)
385 continue;
386
387 dev_info(dev, " %6s %#012llx..%#012llx -> %#012llx\n",
388 "IB MEM", range.cpu_addr,
389 range.cpu_addr + range.size - 1, range.pci_addr);
390
391
392 err = of_pci_range_to_resource(&range, dev_node, &tmp_res);
393 if (err)
394 continue;
395
396 res = devm_kmemdup(dev, &tmp_res, sizeof(tmp_res), GFP_KERNEL);
397 if (!res) {
398 err = -ENOMEM;
399 goto failed;
400 }
401
402 pci_add_resource_offset(ib_resources, res,
403 res->start - range.pci_addr);
404 }
405
406 return 0;
407
408failed:
409 pci_free_resource_list(resources);
410 return err;
411}
412
413#if IS_ENABLED(CONFIG_OF_IRQ)
414/**
415 * of_irq_parse_pci - Resolve the interrupt for a PCI device
416 * @pdev: the device whose interrupt is to be resolved
417 * @out_irq: structure of_phandle_args filled by this function
418 *
419 * This function resolves the PCI interrupt for a given PCI device. If a
420 * device-node exists for a given pci_dev, it will use normal OF tree
421 * walking. If not, it will implement standard swizzling and walk up the
422 * PCI tree until an device-node is found, at which point it will finish
423 * resolving using the OF tree walking.
424 */
425static int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq)
426{
427 struct device_node *dn, *ppnode = NULL;
428 struct pci_dev *ppdev;
429 __be32 laddr[3];
430 u8 pin;
431 int rc;
432
433 /*
434 * Check if we have a device node, if yes, fallback to standard
435 * device tree parsing
436 */
437 dn = pci_device_to_OF_node(pdev);
438 if (dn) {
439 rc = of_irq_parse_one(dn, 0, out_irq);
440 if (!rc)
441 return rc;
442 }
443
444 /*
445 * Ok, we don't, time to have fun. Let's start by building up an
446 * interrupt spec. we assume #interrupt-cells is 1, which is standard
447 * for PCI. If you do different, then don't use that routine.
448 */
449 rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin);
450 if (rc != 0)
451 goto err;
452 /* No pin, exit with no error message. */
453 if (pin == 0)
454 return -ENODEV;
455
456 /* Local interrupt-map in the device node? Use it! */
457 if (of_property_present(dn, "interrupt-map")) {
458 pin = pci_swizzle_interrupt_pin(pdev, pin);
459 ppnode = dn;
460 }
461
462 /* Now we walk up the PCI tree */
463 while (!ppnode) {
464 /* Get the pci_dev of our parent */
465 ppdev = pdev->bus->self;
466
467 /* Ouch, it's a host bridge... */
468 if (ppdev == NULL) {
469 ppnode = pci_bus_to_OF_node(pdev->bus);
470
471 /* No node for host bridge ? give up */
472 if (ppnode == NULL) {
473 rc = -EINVAL;
474 goto err;
475 }
476 } else {
477 /* We found a P2P bridge, check if it has a node */
478 ppnode = pci_device_to_OF_node(ppdev);
479 }
480
481 /*
482 * Ok, we have found a parent with a device-node, hand over to
483 * the OF parsing code.
484 * We build a unit address from the linux device to be used for
485 * resolution. Note that we use the linux bus number which may
486 * not match your firmware bus numbering.
487 * Fortunately, in most cases, interrupt-map-mask doesn't
488 * include the bus number as part of the matching.
489 * You should still be careful about that though if you intend
490 * to rely on this function (you ship a firmware that doesn't
491 * create device nodes for all PCI devices).
492 */
493 if (ppnode)
494 break;
495
496 /*
497 * We can only get here if we hit a P2P bridge with no node;
498 * let's do standard swizzling and try again
499 */
500 pin = pci_swizzle_interrupt_pin(pdev, pin);
501 pdev = ppdev;
502 }
503
504 out_irq->np = ppnode;
505 out_irq->args_count = 1;
506 out_irq->args[0] = pin;
507 laddr[0] = cpu_to_be32((pdev->bus->number << 16) | (pdev->devfn << 8));
508 laddr[1] = laddr[2] = cpu_to_be32(0);
509 rc = of_irq_parse_raw(laddr, out_irq);
510 if (rc)
511 goto err;
512 return 0;
513err:
514 if (rc == -ENOENT) {
515 dev_warn(&pdev->dev,
516 "%s: no interrupt-map found, INTx interrupts not available\n",
517 __func__);
518 pr_warn_once("%s: possibly some PCI slots don't have level triggered interrupts capability\n",
519 __func__);
520 } else {
521 dev_err(&pdev->dev, "%s: failed with rc=%d\n", __func__, rc);
522 }
523 return rc;
524}
525
526/**
527 * of_irq_parse_and_map_pci() - Decode a PCI IRQ from the device tree and map to a VIRQ
528 * @dev: The PCI device needing an IRQ
529 * @slot: PCI slot number; passed when used as map_irq callback. Unused
530 * @pin: PCI IRQ pin number; passed when used as map_irq callback. Unused
531 *
532 * @slot and @pin are unused, but included in the function so that this
533 * function can be used directly as the map_irq callback to
534 * pci_assign_irq() and struct pci_host_bridge.map_irq pointer
535 */
536int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin)
537{
538 struct of_phandle_args oirq;
539 int ret;
540
541 ret = of_irq_parse_pci(dev, &oirq);
542 if (ret)
543 return 0; /* Proper return code 0 == NO_IRQ */
544
545 return irq_create_of_mapping(&oirq);
546}
547EXPORT_SYMBOL_GPL(of_irq_parse_and_map_pci);
548#endif /* CONFIG_OF_IRQ */
549
550static int pci_parse_request_of_pci_ranges(struct device *dev,
551 struct pci_host_bridge *bridge)
552{
553 int err, res_valid = 0;
554 resource_size_t iobase;
555 struct resource_entry *win, *tmp;
556
557 INIT_LIST_HEAD(&bridge->windows);
558 INIT_LIST_HEAD(&bridge->dma_ranges);
559
560 err = devm_of_pci_get_host_bridge_resources(dev, 0, 0xff, &bridge->windows,
561 &bridge->dma_ranges, &iobase);
562 if (err)
563 return err;
564
565 err = devm_request_pci_bus_resources(dev, &bridge->windows);
566 if (err)
567 return err;
568
569 resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
570 struct resource *res = win->res;
571
572 switch (resource_type(res)) {
573 case IORESOURCE_IO:
574 err = devm_pci_remap_iospace(dev, res, iobase);
575 if (err) {
576 dev_warn(dev, "error %d: failed to map resource %pR\n",
577 err, res);
578 resource_list_destroy_entry(win);
579 }
580 break;
581 case IORESOURCE_MEM:
582 res_valid |= !(res->flags & IORESOURCE_PREFETCH);
583
584 if (!(res->flags & IORESOURCE_PREFETCH))
585 if (upper_32_bits(resource_size(res)))
586 dev_warn(dev, "Memory resource size exceeds max for 32 bits\n");
587
588 break;
589 }
590 }
591
592 if (!res_valid)
593 dev_warn(dev, "non-prefetchable memory resource required\n");
594
595 return 0;
596}
597
598int devm_of_pci_bridge_init(struct device *dev, struct pci_host_bridge *bridge)
599{
600 if (!dev->of_node)
601 return 0;
602
603 bridge->swizzle_irq = pci_common_swizzle;
604 bridge->map_irq = of_irq_parse_and_map_pci;
605
606 return pci_parse_request_of_pci_ranges(dev, bridge);
607}
608
609#ifdef CONFIG_PCI_DYNAMIC_OF_NODES
610
611void of_pci_remove_node(struct pci_dev *pdev)
612{
613 struct device_node *np;
614
615 np = pci_device_to_OF_node(pdev);
616 if (!np || !of_node_check_flag(np, OF_DYNAMIC))
617 return;
618 pdev->dev.of_node = NULL;
619
620 of_changeset_revert(np->data);
621 of_changeset_destroy(np->data);
622 of_node_put(np);
623}
624
625void of_pci_make_dev_node(struct pci_dev *pdev)
626{
627 struct device_node *ppnode, *np = NULL;
628 const char *pci_type;
629 struct of_changeset *cset;
630 const char *name;
631 int ret;
632
633 /*
634 * If there is already a device tree node linked to this device,
635 * return immediately.
636 */
637 if (pci_device_to_OF_node(pdev))
638 return;
639
640 /* Check if there is device tree node for parent device */
641 if (!pdev->bus->self)
642 ppnode = pdev->bus->dev.of_node;
643 else
644 ppnode = pdev->bus->self->dev.of_node;
645 if (!ppnode)
646 return;
647
648 if (pci_is_bridge(pdev))
649 pci_type = "pci";
650 else
651 pci_type = "dev";
652
653 name = kasprintf(GFP_KERNEL, "%s@%x,%x", pci_type,
654 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
655 if (!name)
656 return;
657
658 cset = kmalloc(sizeof(*cset), GFP_KERNEL);
659 if (!cset)
660 goto out_free_name;
661 of_changeset_init(cset);
662
663 np = of_changeset_create_node(cset, ppnode, name);
664 if (!np)
665 goto out_destroy_cset;
666
667 ret = of_pci_add_properties(pdev, cset, np);
668 if (ret)
669 goto out_free_node;
670
671 ret = of_changeset_apply(cset);
672 if (ret)
673 goto out_free_node;
674
675 np->data = cset;
676 pdev->dev.of_node = np;
677 kfree(name);
678
679 return;
680
681out_free_node:
682 of_node_put(np);
683out_destroy_cset:
684 of_changeset_destroy(cset);
685 kfree(cset);
686out_free_name:
687 kfree(name);
688}
689#endif
690
691#endif /* CONFIG_PCI */
692
693/**
694 * of_pci_get_max_link_speed - Find the maximum link speed of the given device node.
695 * @node: Device tree node with the maximum link speed information.
696 *
697 * This function will try to find the limitation of link speed by finding
698 * a property called "max-link-speed" of the given device node.
699 *
700 * Return:
701 * * > 0 - On success, a maximum link speed.
702 * * -EINVAL - Invalid "max-link-speed" property value, or failure to access
703 * the property of the device tree node.
704 *
705 * Returns the associated max link speed from DT, or a negative value if the
706 * required property is not found or is invalid.
707 */
708int of_pci_get_max_link_speed(struct device_node *node)
709{
710 u32 max_link_speed;
711
712 if (of_property_read_u32(node, "max-link-speed", &max_link_speed) ||
713 max_link_speed == 0 || max_link_speed > 4)
714 return -EINVAL;
715
716 return max_link_speed;
717}
718EXPORT_SYMBOL_GPL(of_pci_get_max_link_speed);
719
720/**
721 * of_pci_get_slot_power_limit - Parses the "slot-power-limit-milliwatt"
722 * property.
723 *
724 * @node: device tree node with the slot power limit information
725 * @slot_power_limit_value: pointer where the value should be stored in PCIe
726 * Slot Capabilities Register format
727 * @slot_power_limit_scale: pointer where the scale should be stored in PCIe
728 * Slot Capabilities Register format
729 *
730 * Returns the slot power limit in milliwatts and if @slot_power_limit_value
731 * and @slot_power_limit_scale pointers are non-NULL, fills in the value and
732 * scale in format used by PCIe Slot Capabilities Register.
733 *
734 * If the property is not found or is invalid, returns 0.
735 */
736u32 of_pci_get_slot_power_limit(struct device_node *node,
737 u8 *slot_power_limit_value,
738 u8 *slot_power_limit_scale)
739{
740 u32 slot_power_limit_mw;
741 u8 value, scale;
742
743 if (of_property_read_u32(node, "slot-power-limit-milliwatt",
744 &slot_power_limit_mw))
745 slot_power_limit_mw = 0;
746
747 /* Calculate Slot Power Limit Value and Slot Power Limit Scale */
748 if (slot_power_limit_mw == 0) {
749 value = 0x00;
750 scale = 0;
751 } else if (slot_power_limit_mw <= 255) {
752 value = slot_power_limit_mw;
753 scale = 3;
754 } else if (slot_power_limit_mw <= 255*10) {
755 value = slot_power_limit_mw / 10;
756 scale = 2;
757 slot_power_limit_mw = slot_power_limit_mw / 10 * 10;
758 } else if (slot_power_limit_mw <= 255*100) {
759 value = slot_power_limit_mw / 100;
760 scale = 1;
761 slot_power_limit_mw = slot_power_limit_mw / 100 * 100;
762 } else if (slot_power_limit_mw <= 239*1000) {
763 value = slot_power_limit_mw / 1000;
764 scale = 0;
765 slot_power_limit_mw = slot_power_limit_mw / 1000 * 1000;
766 } else if (slot_power_limit_mw < 250*1000) {
767 value = 0xEF;
768 scale = 0;
769 slot_power_limit_mw = 239*1000;
770 } else if (slot_power_limit_mw <= 600*1000) {
771 value = 0xF0 + (slot_power_limit_mw / 1000 - 250) / 25;
772 scale = 0;
773 slot_power_limit_mw = slot_power_limit_mw / (1000*25) * (1000*25);
774 } else {
775 value = 0xFE;
776 scale = 0;
777 slot_power_limit_mw = 600*1000;
778 }
779
780 if (slot_power_limit_value)
781 *slot_power_limit_value = value;
782
783 if (slot_power_limit_scale)
784 *slot_power_limit_scale = scale;
785
786 return slot_power_limit_mw;
787}
788EXPORT_SYMBOL_GPL(of_pci_get_slot_power_limit);