Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/* pci.c: UltraSparc PCI controller support.
   3 *
   4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
   5 * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
   6 * Copyright (C) 1999 Jakub Jelinek   (jj@ultra.linux.cz)
   7 *
   8 * OF tree based PCI bus probing taken from the PowerPC port
   9 * with minor modifications, see there for credits.
  10 */
  11
  12#include <linux/export.h>
  13#include <linux/kernel.h>
  14#include <linux/string.h>
  15#include <linux/sched.h>
  16#include <linux/capability.h>
  17#include <linux/errno.h>
  18#include <linux/pci.h>
  19#include <linux/msi.h>
  20#include <linux/irq.h>
  21#include <linux/init.h>
  22#include <linux/of.h>
  23#include <linux/of_device.h>
  24
  25#include <linux/uaccess.h>
  26#include <asm/pgtable.h>
  27#include <asm/irq.h>
  28#include <asm/prom.h>
  29#include <asm/apb.h>
  30
  31#include "pci_impl.h"
  32#include "kernel.h"
  33
  34/* List of all PCI controllers found in the system. */
  35struct pci_pbm_info *pci_pbm_root = NULL;
  36
  37/* Each PBM found gets a unique index. */
  38int pci_num_pbms = 0;
  39
  40volatile int pci_poke_in_progress;
  41volatile int pci_poke_cpu = -1;
  42volatile int pci_poke_faulted;
  43
  44static DEFINE_SPINLOCK(pci_poke_lock);
  45
  46void pci_config_read8(u8 *addr, u8 *ret)
  47{
  48	unsigned long flags;
  49	u8 byte;
  50
  51	spin_lock_irqsave(&pci_poke_lock, flags);
  52	pci_poke_cpu = smp_processor_id();
  53	pci_poke_in_progress = 1;
  54	pci_poke_faulted = 0;
  55	__asm__ __volatile__("membar #Sync\n\t"
  56			     "lduba [%1] %2, %0\n\t"
  57			     "membar #Sync"
  58			     : "=r" (byte)
  59			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  60			     : "memory");
  61	pci_poke_in_progress = 0;
  62	pci_poke_cpu = -1;
  63	if (!pci_poke_faulted)
  64		*ret = byte;
  65	spin_unlock_irqrestore(&pci_poke_lock, flags);
  66}
  67
  68void pci_config_read16(u16 *addr, u16 *ret)
  69{
  70	unsigned long flags;
  71	u16 word;
  72
  73	spin_lock_irqsave(&pci_poke_lock, flags);
  74	pci_poke_cpu = smp_processor_id();
  75	pci_poke_in_progress = 1;
  76	pci_poke_faulted = 0;
  77	__asm__ __volatile__("membar #Sync\n\t"
  78			     "lduha [%1] %2, %0\n\t"
  79			     "membar #Sync"
  80			     : "=r" (word)
  81			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  82			     : "memory");
  83	pci_poke_in_progress = 0;
  84	pci_poke_cpu = -1;
  85	if (!pci_poke_faulted)
  86		*ret = word;
  87	spin_unlock_irqrestore(&pci_poke_lock, flags);
  88}
  89
  90void pci_config_read32(u32 *addr, u32 *ret)
  91{
  92	unsigned long flags;
  93	u32 dword;
  94
  95	spin_lock_irqsave(&pci_poke_lock, flags);
  96	pci_poke_cpu = smp_processor_id();
  97	pci_poke_in_progress = 1;
  98	pci_poke_faulted = 0;
  99	__asm__ __volatile__("membar #Sync\n\t"
 100			     "lduwa [%1] %2, %0\n\t"
 101			     "membar #Sync"
 102			     : "=r" (dword)
 103			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 104			     : "memory");
 105	pci_poke_in_progress = 0;
 106	pci_poke_cpu = -1;
 107	if (!pci_poke_faulted)
 108		*ret = dword;
 109	spin_unlock_irqrestore(&pci_poke_lock, flags);
 110}
 111
 112void pci_config_write8(u8 *addr, u8 val)
 113{
 114	unsigned long flags;
 115
 116	spin_lock_irqsave(&pci_poke_lock, flags);
 117	pci_poke_cpu = smp_processor_id();
 118	pci_poke_in_progress = 1;
 119	pci_poke_faulted = 0;
 120	__asm__ __volatile__("membar #Sync\n\t"
 121			     "stba %0, [%1] %2\n\t"
 122			     "membar #Sync"
 123			     : /* no outputs */
 124			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 125			     : "memory");
 126	pci_poke_in_progress = 0;
 127	pci_poke_cpu = -1;
 128	spin_unlock_irqrestore(&pci_poke_lock, flags);
 129}
 130
 131void pci_config_write16(u16 *addr, u16 val)
 132{
 133	unsigned long flags;
 134
 135	spin_lock_irqsave(&pci_poke_lock, flags);
 136	pci_poke_cpu = smp_processor_id();
 137	pci_poke_in_progress = 1;
 138	pci_poke_faulted = 0;
 139	__asm__ __volatile__("membar #Sync\n\t"
 140			     "stha %0, [%1] %2\n\t"
 141			     "membar #Sync"
 142			     : /* no outputs */
 143			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 144			     : "memory");
 145	pci_poke_in_progress = 0;
 146	pci_poke_cpu = -1;
 147	spin_unlock_irqrestore(&pci_poke_lock, flags);
 148}
 149
 150void pci_config_write32(u32 *addr, u32 val)
 151{
 152	unsigned long flags;
 153
 154	spin_lock_irqsave(&pci_poke_lock, flags);
 155	pci_poke_cpu = smp_processor_id();
 156	pci_poke_in_progress = 1;
 157	pci_poke_faulted = 0;
 158	__asm__ __volatile__("membar #Sync\n\t"
 159			     "stwa %0, [%1] %2\n\t"
 160			     "membar #Sync"
 161			     : /* no outputs */
 162			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 163			     : "memory");
 164	pci_poke_in_progress = 0;
 165	pci_poke_cpu = -1;
 166	spin_unlock_irqrestore(&pci_poke_lock, flags);
 167}
 168
 169static int ofpci_verbose;
 170
 171static int __init ofpci_debug(char *str)
 172{
 173	int val = 0;
 174
 175	get_option(&str, &val);
 176	if (val)
 177		ofpci_verbose = 1;
 178	return 1;
 179}
 180
 181__setup("ofpci_debug=", ofpci_debug);
 182
 183static unsigned long pci_parse_of_flags(u32 addr0)
 184{
 185	unsigned long flags = 0;
 186
 187	if (addr0 & 0x02000000) {
 188		flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
 189		flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
 190		if (addr0 & 0x01000000)
 191			flags |= IORESOURCE_MEM_64
 192				 | PCI_BASE_ADDRESS_MEM_TYPE_64;
 193		if (addr0 & 0x40000000)
 194			flags |= IORESOURCE_PREFETCH
 195				 | PCI_BASE_ADDRESS_MEM_PREFETCH;
 196	} else if (addr0 & 0x01000000)
 197		flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
 198	return flags;
 199}
 200
 201/* The of_device layer has translated all of the assigned-address properties
 202 * into physical address resources, we only have to figure out the register
 203 * mapping.
 204 */
 205static void pci_parse_of_addrs(struct platform_device *op,
 206			       struct device_node *node,
 207			       struct pci_dev *dev)
 208{
 209	struct resource *op_res;
 210	const u32 *addrs;
 211	int proplen;
 212
 213	addrs = of_get_property(node, "assigned-addresses", &proplen);
 214	if (!addrs)
 215		return;
 216	if (ofpci_verbose)
 217		printk("    parse addresses (%d bytes) @ %p\n",
 218		       proplen, addrs);
 219	op_res = &op->resource[0];
 220	for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
 221		struct resource *res;
 222		unsigned long flags;
 223		int i;
 224
 225		flags = pci_parse_of_flags(addrs[0]);
 226		if (!flags)
 227			continue;
 228		i = addrs[0] & 0xff;
 229		if (ofpci_verbose)
 230			printk("  start: %llx, end: %llx, i: %x\n",
 231			       op_res->start, op_res->end, i);
 232
 233		if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
 234			res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
 235		} else if (i == dev->rom_base_reg) {
 236			res = &dev->resource[PCI_ROM_RESOURCE];
 237			flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
 238		} else {
 239			printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
 240			continue;
 241		}
 242		res->start = op_res->start;
 243		res->end = op_res->end;
 244		res->flags = flags;
 245		res->name = pci_name(dev);
 
 
 246	}
 247}
 248
 249static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
 250				  void *stc, void *host_controller,
 251				  struct platform_device  *op,
 252				  int numa_node)
 253{
 254	sd->iommu = iommu;
 255	sd->stc = stc;
 256	sd->host_controller = host_controller;
 257	sd->op = op;
 258	sd->numa_node = numa_node;
 259}
 260
 261static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 262					 struct device_node *node,
 263					 struct pci_bus *bus, int devfn)
 264{
 265	struct dev_archdata *sd;
 266	struct platform_device *op;
 267	struct pci_dev *dev;
 268	const char *type;
 269	u32 class;
 270
 271	dev = pci_alloc_dev(bus);
 272	if (!dev)
 273		return NULL;
 274
 275	op = of_find_device_by_node(node);
 276	sd = &dev->dev.archdata;
 277	pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
 278			      pbm->numa_node);
 279	sd = &op->dev.archdata;
 280	sd->iommu = pbm->iommu;
 281	sd->stc = &pbm->stc;
 282	sd->numa_node = pbm->numa_node;
 283
 284	if (!strcmp(node->name, "ebus"))
 285		of_propagate_archdata(op);
 286
 287	type = of_get_property(node, "device_type", NULL);
 288	if (type == NULL)
 289		type = "";
 290
 291	if (ofpci_verbose)
 292		printk("    create device, devfn: %x, type: %s\n",
 293		       devfn, type);
 294
 295	dev->sysdata = node;
 296	dev->dev.parent = bus->bridge;
 297	dev->dev.bus = &pci_bus_type;
 298	dev->dev.of_node = of_node_get(node);
 299	dev->devfn = devfn;
 300	dev->multifunction = 0;		/* maybe a lie? */
 301	set_pcie_port_type(dev);
 302
 303	pci_dev_assign_slot(dev);
 304	dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
 305	dev->device = of_getintprop_default(node, "device-id", 0xffff);
 306	dev->subsystem_vendor =
 307		of_getintprop_default(node, "subsystem-vendor-id", 0);
 308	dev->subsystem_device =
 309		of_getintprop_default(node, "subsystem-id", 0);
 310
 311	dev->cfg_size = pci_cfg_space_size(dev);
 312
 313	/* We can't actually use the firmware value, we have
 314	 * to read what is in the register right now.  One
 315	 * reason is that in the case of IDE interfaces the
 316	 * firmware can sample the value before the the IDE
 317	 * interface is programmed into native mode.
 318	 */
 319	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
 320	dev->class = class >> 8;
 321	dev->revision = class & 0xff;
 322
 323	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
 324		dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
 325
 326	if (ofpci_verbose)
 327		printk("    class: 0x%x device name: %s\n",
 328		       dev->class, pci_name(dev));
 329
 330	/* I have seen IDE devices which will not respond to
 331	 * the bmdma simplex check reads if bus mastering is
 332	 * disabled.
 333	 */
 334	if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
 335		pci_set_master(dev);
 336
 337	dev->current_state = PCI_UNKNOWN;	/* unknown power state */
 338	dev->error_state = pci_channel_io_normal;
 339	dev->dma_mask = 0xffffffff;
 340
 341	if (!strcmp(node->name, "pci")) {
 342		/* a PCI-PCI bridge */
 343		dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
 344		dev->rom_base_reg = PCI_ROM_ADDRESS1;
 345	} else if (!strcmp(type, "cardbus")) {
 346		dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
 347	} else {
 348		dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
 349		dev->rom_base_reg = PCI_ROM_ADDRESS;
 350
 351		dev->irq = sd->op->archdata.irqs[0];
 352		if (dev->irq == 0xffffffff)
 353			dev->irq = PCI_IRQ_NONE;
 354	}
 355
 
 
 
 356	pci_parse_of_addrs(sd->op, node, dev);
 357
 358	if (ofpci_verbose)
 359		printk("    adding to system ...\n");
 360
 361	pci_device_add(dev, bus);
 362
 363	return dev;
 364}
 365
 366static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
 367{
 368	u32 idx, first, last;
 369
 370	first = 8;
 371	last = 0;
 372	for (idx = 0; idx < 8; idx++) {
 373		if ((map & (1 << idx)) != 0) {
 374			if (first > idx)
 375				first = idx;
 376			if (last < idx)
 377				last = idx;
 378		}
 379	}
 380
 381	*first_p = first;
 382	*last_p = last;
 383}
 384
 385/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
 386 * a proper 'ranges' property.
 387 */
 388static void apb_fake_ranges(struct pci_dev *dev,
 389			    struct pci_bus *bus,
 390			    struct pci_pbm_info *pbm)
 391{
 392	struct pci_bus_region region;
 393	struct resource *res;
 394	u32 first, last;
 395	u8 map;
 396
 397	pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
 398	apb_calc_first_last(map, &first, &last);
 399	res = bus->resource[0];
 400	res->flags = IORESOURCE_IO;
 401	region.start = (first << 21);
 402	region.end = (last << 21) + ((1 << 21) - 1);
 403	pcibios_bus_to_resource(dev->bus, res, &region);
 404
 405	pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
 406	apb_calc_first_last(map, &first, &last);
 407	res = bus->resource[1];
 408	res->flags = IORESOURCE_MEM;
 409	region.start = (first << 29);
 410	region.end = (last << 29) + ((1 << 29) - 1);
 411	pcibios_bus_to_resource(dev->bus, res, &region);
 412}
 413
 414static void pci_of_scan_bus(struct pci_pbm_info *pbm,
 415			    struct device_node *node,
 416			    struct pci_bus *bus);
 417
 418#define GET_64BIT(prop, i)	((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
 419
 420static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
 421			       struct device_node *node,
 422			       struct pci_dev *dev)
 423{
 424	struct pci_bus *bus;
 425	const u32 *busrange, *ranges;
 426	int len, i, simba;
 427	struct pci_bus_region region;
 428	struct resource *res;
 429	unsigned int flags;
 430	u64 size;
 431
 432	if (ofpci_verbose)
 433		printk("of_scan_pci_bridge(%s)\n", node->full_name);
 434
 435	/* parse bus-range property */
 436	busrange = of_get_property(node, "bus-range", &len);
 437	if (busrange == NULL || len != 8) {
 438		printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
 439		       node->full_name);
 440		return;
 441	}
 442
 443	if (ofpci_verbose)
 444		printk("    Bridge bus range [%u --> %u]\n",
 445		       busrange[0], busrange[1]);
 446
 447	ranges = of_get_property(node, "ranges", &len);
 448	simba = 0;
 449	if (ranges == NULL) {
 450		const char *model = of_get_property(node, "model", NULL);
 451		if (model && !strcmp(model, "SUNW,simba"))
 452			simba = 1;
 453	}
 454
 455	bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
 456	if (!bus) {
 457		printk(KERN_ERR "Failed to create pci bus for %s\n",
 458		       node->full_name);
 459		return;
 460	}
 461
 462	bus->primary = dev->bus->number;
 463	pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
 464	bus->bridge_ctl = 0;
 465
 466	if (ofpci_verbose)
 467		printk("    Bridge ranges[%p] simba[%d]\n",
 468		       ranges, simba);
 469
 470	/* parse ranges property, or cook one up by hand for Simba */
 471	/* PCI #address-cells == 3 and #size-cells == 2 always */
 472	res = &dev->resource[PCI_BRIDGE_RESOURCES];
 473	for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
 474		res->flags = 0;
 475		bus->resource[i] = res;
 476		++res;
 477	}
 478	if (simba) {
 479		apb_fake_ranges(dev, bus, pbm);
 480		goto after_ranges;
 481	} else if (ranges == NULL) {
 482		pci_read_bridge_bases(bus);
 483		goto after_ranges;
 484	}
 485	i = 1;
 486	for (; len >= 32; len -= 32, ranges += 8) {
 487		u64 start;
 488
 489		if (ofpci_verbose)
 490			printk("    RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
 491			       "%08x:%08x]\n",
 492			       ranges[0], ranges[1], ranges[2], ranges[3],
 493			       ranges[4], ranges[5], ranges[6], ranges[7]);
 494
 495		flags = pci_parse_of_flags(ranges[0]);
 496		size = GET_64BIT(ranges, 6);
 497		if (flags == 0 || size == 0)
 498			continue;
 499
 500		/* On PCI-Express systems, PCI bridges that have no devices downstream
 501		 * have a bogus size value where the first 32-bit cell is 0xffffffff.
 502		 * This results in a bogus range where start + size overflows.
 503		 *
 504		 * Just skip these otherwise the kernel will complain when the resource
 505		 * tries to be claimed.
 506		 */
 507		if (size >> 32 == 0xffffffff)
 508			continue;
 509
 510		if (flags & IORESOURCE_IO) {
 511			res = bus->resource[0];
 512			if (res->flags) {
 513				printk(KERN_ERR "PCI: ignoring extra I/O range"
 514				       " for bridge %s\n", node->full_name);
 515				continue;
 516			}
 517		} else {
 518			if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
 519				printk(KERN_ERR "PCI: too many memory ranges"
 520				       " for bridge %s\n", node->full_name);
 521				continue;
 522			}
 523			res = bus->resource[i];
 524			++i;
 525		}
 526
 527		res->flags = flags;
 528		region.start = start = GET_64BIT(ranges, 1);
 529		region.end = region.start + size - 1;
 530
 531		if (ofpci_verbose)
 532			printk("      Using flags[%08x] start[%016llx] size[%016llx]\n",
 533			       flags, start, size);
 534
 535		pcibios_bus_to_resource(dev->bus, res, &region);
 536	}
 537after_ranges:
 538	sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
 539		bus->number);
 540	if (ofpci_verbose)
 541		printk("    bus name: %s\n", bus->name);
 542
 543	pci_of_scan_bus(pbm, node, bus);
 544}
 545
 546static void pci_of_scan_bus(struct pci_pbm_info *pbm,
 547			    struct device_node *node,
 548			    struct pci_bus *bus)
 549{
 550	struct device_node *child;
 551	const u32 *reg;
 552	int reglen, devfn, prev_devfn;
 553	struct pci_dev *dev;
 554
 555	if (ofpci_verbose)
 556		printk("PCI: scan_bus[%s] bus no %d\n",
 557		       node->full_name, bus->number);
 558
 559	child = NULL;
 560	prev_devfn = -1;
 561	while ((child = of_get_next_child(node, child)) != NULL) {
 562		if (ofpci_verbose)
 563			printk("  * %s\n", child->full_name);
 564		reg = of_get_property(child, "reg", &reglen);
 565		if (reg == NULL || reglen < 20)
 566			continue;
 567
 568		devfn = (reg[0] >> 8) & 0xff;
 569
 570		/* This is a workaround for some device trees
 571		 * which list PCI devices twice.  On the V100
 572		 * for example, device number 3 is listed twice.
 573		 * Once as "pm" and once again as "lomp".
 574		 */
 575		if (devfn == prev_devfn)
 576			continue;
 577		prev_devfn = devfn;
 578
 579		/* create a new pci_dev for this device */
 580		dev = of_create_pci_dev(pbm, child, bus, devfn);
 581		if (!dev)
 582			continue;
 583		if (ofpci_verbose)
 584			printk("PCI: dev header type: %x\n",
 585			       dev->hdr_type);
 586
 587		if (pci_is_bridge(dev))
 588			of_scan_pci_bridge(pbm, child, dev);
 589	}
 590}
 591
 592static ssize_t
 593show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
 594{
 595	struct pci_dev *pdev;
 596	struct device_node *dp;
 597
 598	pdev = to_pci_dev(dev);
 599	dp = pdev->dev.of_node;
 600
 601	return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
 602}
 603
 604static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
 605
 606static void pci_bus_register_of_sysfs(struct pci_bus *bus)
 607{
 608	struct pci_dev *dev;
 609	struct pci_bus *child_bus;
 610	int err;
 611
 612	list_for_each_entry(dev, &bus->devices, bus_list) {
 613		/* we don't really care if we can create this file or
 614		 * not, but we need to assign the result of the call
 615		 * or the world will fall under alien invasion and
 616		 * everybody will be frozen on a spaceship ready to be
 617		 * eaten on alpha centauri by some green and jelly
 618		 * humanoid.
 619		 */
 620		err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
 621		(void) err;
 622	}
 623	list_for_each_entry(child_bus, &bus->children, node)
 624		pci_bus_register_of_sysfs(child_bus);
 625}
 626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 627static void pci_claim_bus_resources(struct pci_bus *bus)
 628{
 629	struct pci_bus *child_bus;
 630	struct pci_dev *dev;
 631
 632	list_for_each_entry(dev, &bus->devices, bus_list) {
 633		int i;
 634
 635		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 636			struct resource *r = &dev->resource[i];
 637
 638			if (r->parent || !r->start || !r->flags)
 639				continue;
 640
 641			if (ofpci_verbose)
 642				printk("PCI: Claiming %s: "
 643				       "Resource %d: %016llx..%016llx [%x]\n",
 644				       pci_name(dev), i,
 645				       (unsigned long long)r->start,
 646				       (unsigned long long)r->end,
 647				       (unsigned int)r->flags);
 648
 649			pci_claim_resource(dev, i);
 650		}
 
 
 651	}
 652
 653	list_for_each_entry(child_bus, &bus->children, node)
 654		pci_claim_bus_resources(child_bus);
 655}
 656
 657struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
 658				 struct device *parent)
 659{
 660	LIST_HEAD(resources);
 661	struct device_node *node = pbm->op->dev.of_node;
 662	struct pci_bus *bus;
 663
 664	printk("PCI: Scanning PBM %s\n", node->full_name);
 665
 666	pci_add_resource_offset(&resources, &pbm->io_space,
 667				pbm->io_offset);
 668	pci_add_resource_offset(&resources, &pbm->mem_space,
 669				pbm->mem_offset);
 670	if (pbm->mem64_space.flags)
 671		pci_add_resource_offset(&resources, &pbm->mem64_space,
 672					pbm->mem64_offset);
 673	pbm->busn.start = pbm->pci_first_busno;
 674	pbm->busn.end	= pbm->pci_last_busno;
 675	pbm->busn.flags	= IORESOURCE_BUS;
 676	pci_add_resource(&resources, &pbm->busn);
 677	bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
 678				  pbm, &resources);
 679	if (!bus) {
 680		printk(KERN_ERR "Failed to create bus for %s\n",
 681		       node->full_name);
 682		pci_free_resource_list(&resources);
 683		return NULL;
 684	}
 685
 686	pci_of_scan_bus(pbm, node, bus);
 687	pci_bus_register_of_sysfs(bus);
 688
 689	pci_claim_bus_resources(bus);
 
 690	pci_bus_add_devices(bus);
 691	return bus;
 692}
 693
 694int pcibios_enable_device(struct pci_dev *dev, int mask)
 695{
 696	u16 cmd, oldcmd;
 697	int i;
 698
 699	pci_read_config_word(dev, PCI_COMMAND, &cmd);
 700	oldcmd = cmd;
 701
 702	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 703		struct resource *res = &dev->resource[i];
 704
 705		/* Only set up the requested stuff */
 706		if (!(mask & (1<<i)))
 707			continue;
 708
 709		if (res->flags & IORESOURCE_IO)
 710			cmd |= PCI_COMMAND_IO;
 711		if (res->flags & IORESOURCE_MEM)
 712			cmd |= PCI_COMMAND_MEMORY;
 713	}
 714
 715	if (cmd != oldcmd) {
 716		printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
 717		       pci_name(dev), cmd);
 718                /* Enable the appropriate bits in the PCI command register.  */
 719		pci_write_config_word(dev, PCI_COMMAND, cmd);
 720	}
 721	return 0;
 722}
 723
 724/* Platform support for /proc/bus/pci/X/Y mmap()s. */
 725
 726/* If the user uses a host-bridge as the PCI device, he may use
 727 * this to perform a raw mmap() of the I/O or MEM space behind
 728 * that controller.
 729 *
 730 * This can be useful for execution of x86 PCI bios initialization code
 731 * on a PCI card, like the xfree86 int10 stuff does.
 732 */
 733static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
 734				      enum pci_mmap_state mmap_state)
 735{
 736	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
 737	unsigned long space_size, user_offset, user_size;
 738
 739	if (mmap_state == pci_mmap_io) {
 740		space_size = resource_size(&pbm->io_space);
 741	} else {
 742		space_size = resource_size(&pbm->mem_space);
 743	}
 744
 745	/* Make sure the request is in range. */
 746	user_offset = vma->vm_pgoff << PAGE_SHIFT;
 747	user_size = vma->vm_end - vma->vm_start;
 748
 749	if (user_offset >= space_size ||
 750	    (user_offset + user_size) > space_size)
 751		return -EINVAL;
 752
 753	if (mmap_state == pci_mmap_io) {
 754		vma->vm_pgoff = (pbm->io_space.start +
 755				 user_offset) >> PAGE_SHIFT;
 756	} else {
 757		vma->vm_pgoff = (pbm->mem_space.start +
 758				 user_offset) >> PAGE_SHIFT;
 759	}
 760
 761	return 0;
 762}
 763
 764/* Adjust vm_pgoff of VMA such that it is the physical page offset
 765 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
 766 *
 767 * Basically, the user finds the base address for his device which he wishes
 768 * to mmap.  They read the 32-bit value from the config space base register,
 769 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
 770 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
 771 *
 772 * Returns negative error code on failure, zero on success.
 773 */
 774static int __pci_mmap_make_offset(struct pci_dev *pdev,
 775				  struct vm_area_struct *vma,
 776				  enum pci_mmap_state mmap_state)
 777{
 778	unsigned long user_paddr, user_size;
 779	int i, err;
 780
 781	/* First compute the physical address in vma->vm_pgoff,
 782	 * making sure the user offset is within range in the
 783	 * appropriate PCI space.
 784	 */
 785	err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
 786	if (err)
 787		return err;
 788
 789	/* If this is a mapping on a host bridge, any address
 790	 * is OK.
 791	 */
 792	if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
 793		return err;
 794
 795	/* Otherwise make sure it's in the range for one of the
 796	 * device's resources.
 797	 */
 798	user_paddr = vma->vm_pgoff << PAGE_SHIFT;
 799	user_size = vma->vm_end - vma->vm_start;
 800
 801	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 802		struct resource *rp = &pdev->resource[i];
 803		resource_size_t aligned_end;
 804
 805		/* Active? */
 806		if (!rp->flags)
 807			continue;
 808
 809		/* Same type? */
 810		if (i == PCI_ROM_RESOURCE) {
 811			if (mmap_state != pci_mmap_mem)
 812				continue;
 813		} else {
 814			if ((mmap_state == pci_mmap_io &&
 815			     (rp->flags & IORESOURCE_IO) == 0) ||
 816			    (mmap_state == pci_mmap_mem &&
 817			     (rp->flags & IORESOURCE_MEM) == 0))
 818				continue;
 819		}
 820
 821		/* Align the resource end to the next page address.
 822		 * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
 823		 * because actually we need the address of the next byte
 824		 * after rp->end.
 825		 */
 826		aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
 827
 828		if ((rp->start <= user_paddr) &&
 829		    (user_paddr + user_size) <= aligned_end)
 830			break;
 831	}
 832
 833	if (i > PCI_ROM_RESOURCE)
 834		return -EINVAL;
 835
 836	return 0;
 837}
 838
 839/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
 840 * device mapping.
 841 */
 842static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
 843					     enum pci_mmap_state mmap_state)
 844{
 845	/* Our io_remap_pfn_range takes care of this, do nothing.  */
 846}
 847
 848/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
 849 * for this architecture.  The region in the process to map is described by vm_start
 850 * and vm_end members of VMA, the base physical address is found in vm_pgoff.
 851 * The pci device structure is provided so that architectures may make mapping
 852 * decisions on a per-device or per-bus basis.
 853 *
 854 * Returns a negative error code on failure, zero on success.
 855 */
 856int pci_mmap_page_range(struct pci_dev *dev, int bar,
 857			struct vm_area_struct *vma,
 858			enum pci_mmap_state mmap_state, int write_combine)
 859{
 860	int ret;
 861
 862	ret = __pci_mmap_make_offset(dev, vma, mmap_state);
 863	if (ret < 0)
 864		return ret;
 865
 866	__pci_mmap_set_pgprot(dev, vma, mmap_state);
 867
 868	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 869	ret = io_remap_pfn_range(vma, vma->vm_start,
 870				 vma->vm_pgoff,
 871				 vma->vm_end - vma->vm_start,
 872				 vma->vm_page_prot);
 873	if (ret)
 874		return ret;
 875
 876	return 0;
 877}
 878
 879#ifdef CONFIG_NUMA
 880int pcibus_to_node(struct pci_bus *pbus)
 881{
 882	struct pci_pbm_info *pbm = pbus->sysdata;
 883
 884	return pbm->numa_node;
 885}
 886EXPORT_SYMBOL(pcibus_to_node);
 887#endif
 888
 889/* Return the domain number for this pci bus */
 890
 891int pci_domain_nr(struct pci_bus *pbus)
 892{
 893	struct pci_pbm_info *pbm = pbus->sysdata;
 894	int ret;
 895
 896	if (!pbm) {
 897		ret = -ENXIO;
 898	} else {
 899		ret = pbm->index;
 900	}
 901
 902	return ret;
 903}
 904EXPORT_SYMBOL(pci_domain_nr);
 905
 906#ifdef CONFIG_PCI_MSI
 907int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
 908{
 909	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
 910	unsigned int irq;
 911
 912	if (!pbm->setup_msi_irq)
 913		return -EINVAL;
 914
 915	return pbm->setup_msi_irq(&irq, pdev, desc);
 916}
 917
 918void arch_teardown_msi_irq(unsigned int irq)
 919{
 920	struct msi_desc *entry = irq_get_msi_desc(irq);
 921	struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
 922	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
 923
 924	if (pbm->teardown_msi_irq)
 925		pbm->teardown_msi_irq(irq, pdev);
 926}
 927#endif /* !(CONFIG_PCI_MSI) */
 928
 929static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
 
 
 
 930{
 
 931	struct pci_dev *ali_isa_bridge;
 932	u8 val;
 933
 934	/* ALI sound chips generate 31-bits of DMA, a special register
 935	 * determines what bit 31 is emitted as.
 936	 */
 
 
 
 
 
 937	ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
 938					 PCI_DEVICE_ID_AL_M1533,
 939					 NULL);
 940
 941	pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
 942	if (set_bit)
 943		val |= 0x01;
 944	else
 945		val &= ~0x01;
 946	pci_write_config_byte(ali_isa_bridge, 0x7e, val);
 947	pci_dev_put(ali_isa_bridge);
 948}
 949
 950int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
 951{
 952	u64 dma_addr_mask;
 953
 954	if (pdev == NULL) {
 955		dma_addr_mask = 0xffffffff;
 956	} else {
 957		struct iommu *iommu = pdev->dev.archdata.iommu;
 958
 959		dma_addr_mask = iommu->dma_addr_mask;
 960
 961		if (pdev->vendor == PCI_VENDOR_ID_AL &&
 962		    pdev->device == PCI_DEVICE_ID_AL_M5451 &&
 963		    device_mask == 0x7fffffff) {
 964			ali_sound_dma_hack(pdev,
 965					   (dma_addr_mask & 0x80000000) != 0);
 966			return 1;
 967		}
 968	}
 969
 970	if (device_mask >= (1UL << 32UL))
 971		return 0;
 972
 973	return (device_mask & dma_addr_mask) == dma_addr_mask;
 974}
 975
 976void pci_resource_to_user(const struct pci_dev *pdev, int bar,
 977			  const struct resource *rp, resource_size_t *start,
 978			  resource_size_t *end)
 979{
 980	struct pci_bus_region region;
 981
 982	/*
 983	 * "User" addresses are shown in /sys/devices/pci.../.../resource
 984	 * and /proc/bus/pci/devices and used as mmap offsets for
 985	 * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
 986	 *
 987	 * On sparc, these are PCI bus addresses, i.e., raw BAR values.
 988	 */
 989	pcibios_resource_to_bus(pdev->bus, &region, (struct resource *) rp);
 990	*start = region.start;
 991	*end = region.end;
 992}
 993
 994void pcibios_set_master(struct pci_dev *dev)
 995{
 996	/* No special bus mastering setup handling */
 997}
 998
 999#ifdef CONFIG_PCI_IOV
1000int pcibios_add_device(struct pci_dev *dev)
1001{
1002	struct pci_dev *pdev;
1003
1004	/* Add sriov arch specific initialization here.
1005	 * Copy dev_archdata from PF to VF
1006	 */
1007	if (dev->is_virtfn) {
1008		struct dev_archdata *psd;
1009
1010		pdev = dev->physfn;
1011		psd = &pdev->dev.archdata;
1012		pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
1013				      psd->stc, psd->host_controller, NULL,
1014				      psd->numa_node);
1015	}
1016	return 0;
1017}
1018#endif /* CONFIG_PCI_IOV */
1019
1020static int __init pcibios_init(void)
1021{
1022	pci_dfl_cache_line_size = 64 >> 2;
1023	return 0;
1024}
1025subsys_initcall(pcibios_init);
1026
1027#ifdef CONFIG_SYSFS
1028
1029#define SLOT_NAME_SIZE  11  /* Max decimal digits + null in u32 */
1030
1031static void pcie_bus_slot_names(struct pci_bus *pbus)
1032{
1033	struct pci_dev *pdev;
1034	struct pci_bus *bus;
1035
1036	list_for_each_entry(pdev, &pbus->devices, bus_list) {
1037		char name[SLOT_NAME_SIZE];
1038		struct pci_slot *pci_slot;
1039		const u32 *slot_num;
1040		int len;
1041
1042		slot_num = of_get_property(pdev->dev.of_node,
1043					   "physical-slot#", &len);
1044
1045		if (slot_num == NULL || len != 4)
1046			continue;
1047
1048		snprintf(name, sizeof(name), "%u", slot_num[0]);
1049		pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
1050
1051		if (IS_ERR(pci_slot))
1052			pr_err("PCI: pci_create_slot returned %ld.\n",
1053			       PTR_ERR(pci_slot));
1054	}
1055
1056	list_for_each_entry(bus, &pbus->children, node)
1057		pcie_bus_slot_names(bus);
1058}
1059
1060static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
1061{
1062	const struct pci_slot_names {
1063		u32	slot_mask;
1064		char	names[0];
1065	} *prop;
1066	const char *sp;
1067	int len, i;
1068	u32 mask;
1069
1070	prop = of_get_property(node, "slot-names", &len);
1071	if (!prop)
1072		return;
1073
1074	mask = prop->slot_mask;
1075	sp = prop->names;
1076
1077	if (ofpci_verbose)
1078		printk("PCI: Making slots for [%s] mask[0x%02x]\n",
1079		       node->full_name, mask);
1080
1081	i = 0;
1082	while (mask) {
1083		struct pci_slot *pci_slot;
1084		u32 this_bit = 1 << i;
1085
1086		if (!(mask & this_bit)) {
1087			i++;
1088			continue;
1089		}
1090
1091		if (ofpci_verbose)
1092			printk("PCI: Making slot [%s]\n", sp);
1093
1094		pci_slot = pci_create_slot(bus, i, sp, NULL);
1095		if (IS_ERR(pci_slot))
1096			printk(KERN_ERR "PCI: pci_create_slot returned %ld\n",
1097			       PTR_ERR(pci_slot));
1098
1099		sp += strlen(sp) + 1;
1100		mask &= ~this_bit;
1101		i++;
1102	}
1103}
1104
1105static int __init of_pci_slot_init(void)
1106{
1107	struct pci_bus *pbus = NULL;
1108
1109	while ((pbus = pci_find_next_bus(pbus)) != NULL) {
1110		struct device_node *node;
1111		struct pci_dev *pdev;
1112
1113		pdev = list_first_entry(&pbus->devices, struct pci_dev,
1114					bus_list);
1115
1116		if (pdev && pci_is_pcie(pdev)) {
1117			pcie_bus_slot_names(pbus);
1118		} else {
1119
1120			if (pbus->self) {
1121
1122				/* PCI->PCI bridge */
1123				node = pbus->self->dev.of_node;
1124
1125			} else {
1126				struct pci_pbm_info *pbm = pbus->sysdata;
1127
1128				/* Host PCI controller */
1129				node = pbm->op->dev.of_node;
1130			}
1131
1132			pci_bus_slot_names(node, pbus);
1133		}
1134	}
1135
1136	return 0;
1137}
1138device_initcall(of_pci_slot_init);
1139#endif
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/* pci.c: UltraSparc PCI controller support.
   3 *
   4 * Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
   5 * Copyright (C) 1998, 1999 Eddie C. Dost   (ecd@skynet.be)
   6 * Copyright (C) 1999 Jakub Jelinek   (jj@ultra.linux.cz)
   7 *
   8 * OF tree based PCI bus probing taken from the PowerPC port
   9 * with minor modifications, see there for credits.
  10 */
  11
  12#include <linux/export.h>
  13#include <linux/kernel.h>
  14#include <linux/string.h>
  15#include <linux/sched.h>
  16#include <linux/capability.h>
  17#include <linux/errno.h>
  18#include <linux/pci.h>
  19#include <linux/msi.h>
  20#include <linux/irq.h>
  21#include <linux/init.h>
  22#include <linux/of.h>
  23#include <linux/of_device.h>
  24
  25#include <linux/uaccess.h>
  26#include <asm/pgtable.h>
  27#include <asm/irq.h>
  28#include <asm/prom.h>
  29#include <asm/apb.h>
  30
  31#include "pci_impl.h"
  32#include "kernel.h"
  33
  34/* List of all PCI controllers found in the system. */
  35struct pci_pbm_info *pci_pbm_root = NULL;
  36
  37/* Each PBM found gets a unique index. */
  38int pci_num_pbms = 0;
  39
  40volatile int pci_poke_in_progress;
  41volatile int pci_poke_cpu = -1;
  42volatile int pci_poke_faulted;
  43
  44static DEFINE_SPINLOCK(pci_poke_lock);
  45
  46void pci_config_read8(u8 *addr, u8 *ret)
  47{
  48	unsigned long flags;
  49	u8 byte;
  50
  51	spin_lock_irqsave(&pci_poke_lock, flags);
  52	pci_poke_cpu = smp_processor_id();
  53	pci_poke_in_progress = 1;
  54	pci_poke_faulted = 0;
  55	__asm__ __volatile__("membar #Sync\n\t"
  56			     "lduba [%1] %2, %0\n\t"
  57			     "membar #Sync"
  58			     : "=r" (byte)
  59			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  60			     : "memory");
  61	pci_poke_in_progress = 0;
  62	pci_poke_cpu = -1;
  63	if (!pci_poke_faulted)
  64		*ret = byte;
  65	spin_unlock_irqrestore(&pci_poke_lock, flags);
  66}
  67
  68void pci_config_read16(u16 *addr, u16 *ret)
  69{
  70	unsigned long flags;
  71	u16 word;
  72
  73	spin_lock_irqsave(&pci_poke_lock, flags);
  74	pci_poke_cpu = smp_processor_id();
  75	pci_poke_in_progress = 1;
  76	pci_poke_faulted = 0;
  77	__asm__ __volatile__("membar #Sync\n\t"
  78			     "lduha [%1] %2, %0\n\t"
  79			     "membar #Sync"
  80			     : "=r" (word)
  81			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
  82			     : "memory");
  83	pci_poke_in_progress = 0;
  84	pci_poke_cpu = -1;
  85	if (!pci_poke_faulted)
  86		*ret = word;
  87	spin_unlock_irqrestore(&pci_poke_lock, flags);
  88}
  89
  90void pci_config_read32(u32 *addr, u32 *ret)
  91{
  92	unsigned long flags;
  93	u32 dword;
  94
  95	spin_lock_irqsave(&pci_poke_lock, flags);
  96	pci_poke_cpu = smp_processor_id();
  97	pci_poke_in_progress = 1;
  98	pci_poke_faulted = 0;
  99	__asm__ __volatile__("membar #Sync\n\t"
 100			     "lduwa [%1] %2, %0\n\t"
 101			     "membar #Sync"
 102			     : "=r" (dword)
 103			     : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 104			     : "memory");
 105	pci_poke_in_progress = 0;
 106	pci_poke_cpu = -1;
 107	if (!pci_poke_faulted)
 108		*ret = dword;
 109	spin_unlock_irqrestore(&pci_poke_lock, flags);
 110}
 111
 112void pci_config_write8(u8 *addr, u8 val)
 113{
 114	unsigned long flags;
 115
 116	spin_lock_irqsave(&pci_poke_lock, flags);
 117	pci_poke_cpu = smp_processor_id();
 118	pci_poke_in_progress = 1;
 119	pci_poke_faulted = 0;
 120	__asm__ __volatile__("membar #Sync\n\t"
 121			     "stba %0, [%1] %2\n\t"
 122			     "membar #Sync"
 123			     : /* no outputs */
 124			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 125			     : "memory");
 126	pci_poke_in_progress = 0;
 127	pci_poke_cpu = -1;
 128	spin_unlock_irqrestore(&pci_poke_lock, flags);
 129}
 130
 131void pci_config_write16(u16 *addr, u16 val)
 132{
 133	unsigned long flags;
 134
 135	spin_lock_irqsave(&pci_poke_lock, flags);
 136	pci_poke_cpu = smp_processor_id();
 137	pci_poke_in_progress = 1;
 138	pci_poke_faulted = 0;
 139	__asm__ __volatile__("membar #Sync\n\t"
 140			     "stha %0, [%1] %2\n\t"
 141			     "membar #Sync"
 142			     : /* no outputs */
 143			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 144			     : "memory");
 145	pci_poke_in_progress = 0;
 146	pci_poke_cpu = -1;
 147	spin_unlock_irqrestore(&pci_poke_lock, flags);
 148}
 149
 150void pci_config_write32(u32 *addr, u32 val)
 151{
 152	unsigned long flags;
 153
 154	spin_lock_irqsave(&pci_poke_lock, flags);
 155	pci_poke_cpu = smp_processor_id();
 156	pci_poke_in_progress = 1;
 157	pci_poke_faulted = 0;
 158	__asm__ __volatile__("membar #Sync\n\t"
 159			     "stwa %0, [%1] %2\n\t"
 160			     "membar #Sync"
 161			     : /* no outputs */
 162			     : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
 163			     : "memory");
 164	pci_poke_in_progress = 0;
 165	pci_poke_cpu = -1;
 166	spin_unlock_irqrestore(&pci_poke_lock, flags);
 167}
 168
 169static int ofpci_verbose;
 170
 171static int __init ofpci_debug(char *str)
 172{
 173	int val = 0;
 174
 175	get_option(&str, &val);
 176	if (val)
 177		ofpci_verbose = 1;
 178	return 1;
 179}
 180
 181__setup("ofpci_debug=", ofpci_debug);
 182
 183static unsigned long pci_parse_of_flags(u32 addr0)
 184{
 185	unsigned long flags = 0;
 186
 187	if (addr0 & 0x02000000) {
 188		flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
 189		flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
 190		if (addr0 & 0x01000000)
 191			flags |= IORESOURCE_MEM_64
 192				 | PCI_BASE_ADDRESS_MEM_TYPE_64;
 193		if (addr0 & 0x40000000)
 194			flags |= IORESOURCE_PREFETCH
 195				 | PCI_BASE_ADDRESS_MEM_PREFETCH;
 196	} else if (addr0 & 0x01000000)
 197		flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
 198	return flags;
 199}
 200
 201/* The of_device layer has translated all of the assigned-address properties
 202 * into physical address resources, we only have to figure out the register
 203 * mapping.
 204 */
 205static void pci_parse_of_addrs(struct platform_device *op,
 206			       struct device_node *node,
 207			       struct pci_dev *dev)
 208{
 209	struct resource *op_res;
 210	const u32 *addrs;
 211	int proplen;
 212
 213	addrs = of_get_property(node, "assigned-addresses", &proplen);
 214	if (!addrs)
 215		return;
 216	if (ofpci_verbose)
 217		pci_info(dev, "    parse addresses (%d bytes) @ %p\n",
 218			 proplen, addrs);
 219	op_res = &op->resource[0];
 220	for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
 221		struct resource *res;
 222		unsigned long flags;
 223		int i;
 224
 225		flags = pci_parse_of_flags(addrs[0]);
 226		if (!flags)
 227			continue;
 228		i = addrs[0] & 0xff;
 229		if (ofpci_verbose)
 230			pci_info(dev, "  start: %llx, end: %llx, i: %x\n",
 231				 op_res->start, op_res->end, i);
 232
 233		if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
 234			res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
 235		} else if (i == dev->rom_base_reg) {
 236			res = &dev->resource[PCI_ROM_RESOURCE];
 237			flags |= IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
 238		} else {
 239			pci_err(dev, "bad cfg reg num 0x%x\n", i);
 240			continue;
 241		}
 242		res->start = op_res->start;
 243		res->end = op_res->end;
 244		res->flags = flags;
 245		res->name = pci_name(dev);
 246
 247		pci_info(dev, "reg 0x%x: %pR\n", i, res);
 248	}
 249}
 250
 251static void pci_init_dev_archdata(struct dev_archdata *sd, void *iommu,
 252				  void *stc, void *host_controller,
 253				  struct platform_device  *op,
 254				  int numa_node)
 255{
 256	sd->iommu = iommu;
 257	sd->stc = stc;
 258	sd->host_controller = host_controller;
 259	sd->op = op;
 260	sd->numa_node = numa_node;
 261}
 262
 263static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
 264					 struct device_node *node,
 265					 struct pci_bus *bus, int devfn)
 266{
 267	struct dev_archdata *sd;
 268	struct platform_device *op;
 269	struct pci_dev *dev;
 
 270	u32 class;
 271
 272	dev = pci_alloc_dev(bus);
 273	if (!dev)
 274		return NULL;
 275
 276	op = of_find_device_by_node(node);
 277	sd = &dev->dev.archdata;
 278	pci_init_dev_archdata(sd, pbm->iommu, &pbm->stc, pbm, op,
 279			      pbm->numa_node);
 280	sd = &op->dev.archdata;
 281	sd->iommu = pbm->iommu;
 282	sd->stc = &pbm->stc;
 283	sd->numa_node = pbm->numa_node;
 284
 285	if (of_node_name_eq(node, "ebus"))
 286		of_propagate_archdata(op);
 287
 
 
 
 
 288	if (ofpci_verbose)
 289		pci_info(bus,"    create device, devfn: %x, type: %s\n",
 290			 devfn, of_node_get_device_type(node));
 291
 292	dev->sysdata = node;
 293	dev->dev.parent = bus->bridge;
 294	dev->dev.bus = &pci_bus_type;
 295	dev->dev.of_node = of_node_get(node);
 296	dev->devfn = devfn;
 297	dev->multifunction = 0;		/* maybe a lie? */
 298	set_pcie_port_type(dev);
 299
 300	pci_dev_assign_slot(dev);
 301	dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
 302	dev->device = of_getintprop_default(node, "device-id", 0xffff);
 303	dev->subsystem_vendor =
 304		of_getintprop_default(node, "subsystem-vendor-id", 0);
 305	dev->subsystem_device =
 306		of_getintprop_default(node, "subsystem-id", 0);
 307
 308	dev->cfg_size = pci_cfg_space_size(dev);
 309
 310	/* We can't actually use the firmware value, we have
 311	 * to read what is in the register right now.  One
 312	 * reason is that in the case of IDE interfaces the
 313	 * firmware can sample the value before the the IDE
 314	 * interface is programmed into native mode.
 315	 */
 316	pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
 317	dev->class = class >> 8;
 318	dev->revision = class & 0xff;
 319
 320	dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
 321		dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
 322
 
 
 
 
 323	/* I have seen IDE devices which will not respond to
 324	 * the bmdma simplex check reads if bus mastering is
 325	 * disabled.
 326	 */
 327	if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
 328		pci_set_master(dev);
 329
 330	dev->current_state = PCI_UNKNOWN;	/* unknown power state */
 331	dev->error_state = pci_channel_io_normal;
 332	dev->dma_mask = 0xffffffff;
 333
 334	if (of_node_name_eq(node, "pci")) {
 335		/* a PCI-PCI bridge */
 336		dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
 337		dev->rom_base_reg = PCI_ROM_ADDRESS1;
 338	} else if (of_node_is_type(node, "cardbus")) {
 339		dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
 340	} else {
 341		dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
 342		dev->rom_base_reg = PCI_ROM_ADDRESS;
 343
 344		dev->irq = sd->op->archdata.irqs[0];
 345		if (dev->irq == 0xffffffff)
 346			dev->irq = PCI_IRQ_NONE;
 347	}
 348
 349	pci_info(dev, "[%04x:%04x] type %02x class %#08x\n",
 350		 dev->vendor, dev->device, dev->hdr_type, dev->class);
 351
 352	pci_parse_of_addrs(sd->op, node, dev);
 353
 354	if (ofpci_verbose)
 355		pci_info(dev, "    adding to system ...\n");
 356
 357	pci_device_add(dev, bus);
 358
 359	return dev;
 360}
 361
 362static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
 363{
 364	u32 idx, first, last;
 365
 366	first = 8;
 367	last = 0;
 368	for (idx = 0; idx < 8; idx++) {
 369		if ((map & (1 << idx)) != 0) {
 370			if (first > idx)
 371				first = idx;
 372			if (last < idx)
 373				last = idx;
 374		}
 375	}
 376
 377	*first_p = first;
 378	*last_p = last;
 379}
 380
 381/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
 382 * a proper 'ranges' property.
 383 */
 384static void apb_fake_ranges(struct pci_dev *dev,
 385			    struct pci_bus *bus,
 386			    struct pci_pbm_info *pbm)
 387{
 388	struct pci_bus_region region;
 389	struct resource *res;
 390	u32 first, last;
 391	u8 map;
 392
 393	pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
 394	apb_calc_first_last(map, &first, &last);
 395	res = bus->resource[0];
 396	res->flags = IORESOURCE_IO;
 397	region.start = (first << 21);
 398	region.end = (last << 21) + ((1 << 21) - 1);
 399	pcibios_bus_to_resource(dev->bus, res, &region);
 400
 401	pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
 402	apb_calc_first_last(map, &first, &last);
 403	res = bus->resource[1];
 404	res->flags = IORESOURCE_MEM;
 405	region.start = (first << 29);
 406	region.end = (last << 29) + ((1 << 29) - 1);
 407	pcibios_bus_to_resource(dev->bus, res, &region);
 408}
 409
 410static void pci_of_scan_bus(struct pci_pbm_info *pbm,
 411			    struct device_node *node,
 412			    struct pci_bus *bus);
 413
 414#define GET_64BIT(prop, i)	((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
 415
 416static void of_scan_pci_bridge(struct pci_pbm_info *pbm,
 417			       struct device_node *node,
 418			       struct pci_dev *dev)
 419{
 420	struct pci_bus *bus;
 421	const u32 *busrange, *ranges;
 422	int len, i, simba;
 423	struct pci_bus_region region;
 424	struct resource *res;
 425	unsigned int flags;
 426	u64 size;
 427
 428	if (ofpci_verbose)
 429		pci_info(dev, "of_scan_pci_bridge(%pOF)\n", node);
 430
 431	/* parse bus-range property */
 432	busrange = of_get_property(node, "bus-range", &len);
 433	if (busrange == NULL || len != 8) {
 434		pci_info(dev, "Can't get bus-range for PCI-PCI bridge %pOF\n",
 435		       node);
 436		return;
 437	}
 438
 439	if (ofpci_verbose)
 440		pci_info(dev, "    Bridge bus range [%u --> %u]\n",
 441			 busrange[0], busrange[1]);
 442
 443	ranges = of_get_property(node, "ranges", &len);
 444	simba = 0;
 445	if (ranges == NULL) {
 446		const char *model = of_get_property(node, "model", NULL);
 447		if (model && !strcmp(model, "SUNW,simba"))
 448			simba = 1;
 449	}
 450
 451	bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
 452	if (!bus) {
 453		pci_err(dev, "Failed to create pci bus for %pOF\n",
 454			node);
 455		return;
 456	}
 457
 458	bus->primary = dev->bus->number;
 459	pci_bus_insert_busn_res(bus, busrange[0], busrange[1]);
 460	bus->bridge_ctl = 0;
 461
 462	if (ofpci_verbose)
 463		pci_info(dev, "    Bridge ranges[%p] simba[%d]\n",
 464			 ranges, simba);
 465
 466	/* parse ranges property, or cook one up by hand for Simba */
 467	/* PCI #address-cells == 3 and #size-cells == 2 always */
 468	res = &dev->resource[PCI_BRIDGE_RESOURCES];
 469	for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
 470		res->flags = 0;
 471		bus->resource[i] = res;
 472		++res;
 473	}
 474	if (simba) {
 475		apb_fake_ranges(dev, bus, pbm);
 476		goto after_ranges;
 477	} else if (ranges == NULL) {
 478		pci_read_bridge_bases(bus);
 479		goto after_ranges;
 480	}
 481	i = 1;
 482	for (; len >= 32; len -= 32, ranges += 8) {
 483		u64 start;
 484
 485		if (ofpci_verbose)
 486			pci_info(dev, "    RAW Range[%08x:%08x:%08x:%08x:%08x:%08x:"
 487				 "%08x:%08x]\n",
 488				 ranges[0], ranges[1], ranges[2], ranges[3],
 489				 ranges[4], ranges[5], ranges[6], ranges[7]);
 490
 491		flags = pci_parse_of_flags(ranges[0]);
 492		size = GET_64BIT(ranges, 6);
 493		if (flags == 0 || size == 0)
 494			continue;
 495
 496		/* On PCI-Express systems, PCI bridges that have no devices downstream
 497		 * have a bogus size value where the first 32-bit cell is 0xffffffff.
 498		 * This results in a bogus range where start + size overflows.
 499		 *
 500		 * Just skip these otherwise the kernel will complain when the resource
 501		 * tries to be claimed.
 502		 */
 503		if (size >> 32 == 0xffffffff)
 504			continue;
 505
 506		if (flags & IORESOURCE_IO) {
 507			res = bus->resource[0];
 508			if (res->flags) {
 509				pci_err(dev, "ignoring extra I/O range"
 510					" for bridge %pOF\n", node);
 511				continue;
 512			}
 513		} else {
 514			if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
 515				pci_err(dev, "too many memory ranges"
 516					" for bridge %pOF\n", node);
 517				continue;
 518			}
 519			res = bus->resource[i];
 520			++i;
 521		}
 522
 523		res->flags = flags;
 524		region.start = start = GET_64BIT(ranges, 1);
 525		region.end = region.start + size - 1;
 526
 527		if (ofpci_verbose)
 528			pci_info(dev, "      Using flags[%08x] start[%016llx] size[%016llx]\n",
 529				 flags, start, size);
 530
 531		pcibios_bus_to_resource(dev->bus, res, &region);
 532	}
 533after_ranges:
 534	sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
 535		bus->number);
 536	if (ofpci_verbose)
 537		pci_info(dev, "    bus name: %s\n", bus->name);
 538
 539	pci_of_scan_bus(pbm, node, bus);
 540}
 541
 542static void pci_of_scan_bus(struct pci_pbm_info *pbm,
 543			    struct device_node *node,
 544			    struct pci_bus *bus)
 545{
 546	struct device_node *child;
 547	const u32 *reg;
 548	int reglen, devfn, prev_devfn;
 549	struct pci_dev *dev;
 550
 551	if (ofpci_verbose)
 552		pci_info(bus, "scan_bus[%pOF] bus no %d\n",
 553			 node, bus->number);
 554
 555	child = NULL;
 556	prev_devfn = -1;
 557	while ((child = of_get_next_child(node, child)) != NULL) {
 558		if (ofpci_verbose)
 559			pci_info(bus, "  * %pOF\n", child);
 560		reg = of_get_property(child, "reg", &reglen);
 561		if (reg == NULL || reglen < 20)
 562			continue;
 563
 564		devfn = (reg[0] >> 8) & 0xff;
 565
 566		/* This is a workaround for some device trees
 567		 * which list PCI devices twice.  On the V100
 568		 * for example, device number 3 is listed twice.
 569		 * Once as "pm" and once again as "lomp".
 570		 */
 571		if (devfn == prev_devfn)
 572			continue;
 573		prev_devfn = devfn;
 574
 575		/* create a new pci_dev for this device */
 576		dev = of_create_pci_dev(pbm, child, bus, devfn);
 577		if (!dev)
 578			continue;
 579		if (ofpci_verbose)
 580			pci_info(dev, "dev header type: %x\n", dev->hdr_type);
 
 581
 582		if (pci_is_bridge(dev))
 583			of_scan_pci_bridge(pbm, child, dev);
 584	}
 585}
 586
 587static ssize_t
 588show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
 589{
 590	struct pci_dev *pdev;
 591	struct device_node *dp;
 592
 593	pdev = to_pci_dev(dev);
 594	dp = pdev->dev.of_node;
 595
 596	return snprintf (buf, PAGE_SIZE, "%pOF\n", dp);
 597}
 598
 599static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
 600
 601static void pci_bus_register_of_sysfs(struct pci_bus *bus)
 602{
 603	struct pci_dev *dev;
 604	struct pci_bus *child_bus;
 605	int err;
 606
 607	list_for_each_entry(dev, &bus->devices, bus_list) {
 608		/* we don't really care if we can create this file or
 609		 * not, but we need to assign the result of the call
 610		 * or the world will fall under alien invasion and
 611		 * everybody will be frozen on a spaceship ready to be
 612		 * eaten on alpha centauri by some green and jelly
 613		 * humanoid.
 614		 */
 615		err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
 616		(void) err;
 617	}
 618	list_for_each_entry(child_bus, &bus->children, node)
 619		pci_bus_register_of_sysfs(child_bus);
 620}
 621
 622static void pci_claim_legacy_resources(struct pci_dev *dev)
 623{
 624	struct pci_bus_region region;
 625	struct resource *p, *root, *conflict;
 626
 627	if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
 628		return;
 629
 630	p = kzalloc(sizeof(*p), GFP_KERNEL);
 631	if (!p)
 632		return;
 633
 634	p->name = "Video RAM area";
 635	p->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
 636
 637	region.start = 0xa0000UL;
 638	region.end = region.start + 0x1ffffUL;
 639	pcibios_bus_to_resource(dev->bus, p, &region);
 640
 641	root = pci_find_parent_resource(dev, p);
 642	if (!root) {
 643		pci_info(dev, "can't claim VGA legacy %pR: no compatible bridge window\n", p);
 644		goto err;
 645	}
 646
 647	conflict = request_resource_conflict(root, p);
 648	if (conflict) {
 649		pci_info(dev, "can't claim VGA legacy %pR: address conflict with %s %pR\n",
 650			 p, conflict->name, conflict);
 651		goto err;
 652	}
 653
 654	pci_info(dev, "VGA legacy framebuffer %pR\n", p);
 655	return;
 656
 657err:
 658	kfree(p);
 659}
 660
 661static void pci_claim_bus_resources(struct pci_bus *bus)
 662{
 663	struct pci_bus *child_bus;
 664	struct pci_dev *dev;
 665
 666	list_for_each_entry(dev, &bus->devices, bus_list) {
 667		int i;
 668
 669		for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 670			struct resource *r = &dev->resource[i];
 671
 672			if (r->parent || !r->start || !r->flags)
 673				continue;
 674
 675			if (ofpci_verbose)
 676				pci_info(dev, "Claiming Resource %d: %pR\n",
 677					 i, r);
 
 
 
 
 678
 679			pci_claim_resource(dev, i);
 680		}
 681
 682		pci_claim_legacy_resources(dev);
 683	}
 684
 685	list_for_each_entry(child_bus, &bus->children, node)
 686		pci_claim_bus_resources(child_bus);
 687}
 688
 689struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
 690				 struct device *parent)
 691{
 692	LIST_HEAD(resources);
 693	struct device_node *node = pbm->op->dev.of_node;
 694	struct pci_bus *bus;
 695
 696	printk("PCI: Scanning PBM %pOF\n", node);
 697
 698	pci_add_resource_offset(&resources, &pbm->io_space,
 699				pbm->io_offset);
 700	pci_add_resource_offset(&resources, &pbm->mem_space,
 701				pbm->mem_offset);
 702	if (pbm->mem64_space.flags)
 703		pci_add_resource_offset(&resources, &pbm->mem64_space,
 704					pbm->mem64_offset);
 705	pbm->busn.start = pbm->pci_first_busno;
 706	pbm->busn.end	= pbm->pci_last_busno;
 707	pbm->busn.flags	= IORESOURCE_BUS;
 708	pci_add_resource(&resources, &pbm->busn);
 709	bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
 710				  pbm, &resources);
 711	if (!bus) {
 712		printk(KERN_ERR "Failed to create bus for %pOF\n", node);
 
 713		pci_free_resource_list(&resources);
 714		return NULL;
 715	}
 716
 717	pci_of_scan_bus(pbm, node, bus);
 718	pci_bus_register_of_sysfs(bus);
 719
 720	pci_claim_bus_resources(bus);
 721
 722	pci_bus_add_devices(bus);
 723	return bus;
 724}
 725
 726int pcibios_enable_device(struct pci_dev *dev, int mask)
 727{
 728	u16 cmd, oldcmd;
 729	int i;
 730
 731	pci_read_config_word(dev, PCI_COMMAND, &cmd);
 732	oldcmd = cmd;
 733
 734	for (i = 0; i < PCI_NUM_RESOURCES; i++) {
 735		struct resource *res = &dev->resource[i];
 736
 737		/* Only set up the requested stuff */
 738		if (!(mask & (1<<i)))
 739			continue;
 740
 741		if (res->flags & IORESOURCE_IO)
 742			cmd |= PCI_COMMAND_IO;
 743		if (res->flags & IORESOURCE_MEM)
 744			cmd |= PCI_COMMAND_MEMORY;
 745	}
 746
 747	if (cmd != oldcmd) {
 748		pci_info(dev, "enabling device (%04x -> %04x)\n", oldcmd, cmd);
 
 
 749		pci_write_config_word(dev, PCI_COMMAND, cmd);
 750	}
 751	return 0;
 752}
 753
 754/* Platform support for /proc/bus/pci/X/Y mmap()s. */
 755
 756/* If the user uses a host-bridge as the PCI device, he may use
 757 * this to perform a raw mmap() of the I/O or MEM space behind
 758 * that controller.
 759 *
 760 * This can be useful for execution of x86 PCI bios initialization code
 761 * on a PCI card, like the xfree86 int10 stuff does.
 762 */
 763static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
 764				      enum pci_mmap_state mmap_state)
 765{
 766	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
 767	unsigned long space_size, user_offset, user_size;
 768
 769	if (mmap_state == pci_mmap_io) {
 770		space_size = resource_size(&pbm->io_space);
 771	} else {
 772		space_size = resource_size(&pbm->mem_space);
 773	}
 774
 775	/* Make sure the request is in range. */
 776	user_offset = vma->vm_pgoff << PAGE_SHIFT;
 777	user_size = vma->vm_end - vma->vm_start;
 778
 779	if (user_offset >= space_size ||
 780	    (user_offset + user_size) > space_size)
 781		return -EINVAL;
 782
 783	if (mmap_state == pci_mmap_io) {
 784		vma->vm_pgoff = (pbm->io_space.start +
 785				 user_offset) >> PAGE_SHIFT;
 786	} else {
 787		vma->vm_pgoff = (pbm->mem_space.start +
 788				 user_offset) >> PAGE_SHIFT;
 789	}
 790
 791	return 0;
 792}
 793
 794/* Adjust vm_pgoff of VMA such that it is the physical page offset
 795 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
 796 *
 797 * Basically, the user finds the base address for his device which he wishes
 798 * to mmap.  They read the 32-bit value from the config space base register,
 799 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
 800 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
 801 *
 802 * Returns negative error code on failure, zero on success.
 803 */
 804static int __pci_mmap_make_offset(struct pci_dev *pdev,
 805				  struct vm_area_struct *vma,
 806				  enum pci_mmap_state mmap_state)
 807{
 808	unsigned long user_paddr, user_size;
 809	int i, err;
 810
 811	/* First compute the physical address in vma->vm_pgoff,
 812	 * making sure the user offset is within range in the
 813	 * appropriate PCI space.
 814	 */
 815	err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
 816	if (err)
 817		return err;
 818
 819	/* If this is a mapping on a host bridge, any address
 820	 * is OK.
 821	 */
 822	if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
 823		return err;
 824
 825	/* Otherwise make sure it's in the range for one of the
 826	 * device's resources.
 827	 */
 828	user_paddr = vma->vm_pgoff << PAGE_SHIFT;
 829	user_size = vma->vm_end - vma->vm_start;
 830
 831	for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
 832		struct resource *rp = &pdev->resource[i];
 833		resource_size_t aligned_end;
 834
 835		/* Active? */
 836		if (!rp->flags)
 837			continue;
 838
 839		/* Same type? */
 840		if (i == PCI_ROM_RESOURCE) {
 841			if (mmap_state != pci_mmap_mem)
 842				continue;
 843		} else {
 844			if ((mmap_state == pci_mmap_io &&
 845			     (rp->flags & IORESOURCE_IO) == 0) ||
 846			    (mmap_state == pci_mmap_mem &&
 847			     (rp->flags & IORESOURCE_MEM) == 0))
 848				continue;
 849		}
 850
 851		/* Align the resource end to the next page address.
 852		 * PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
 853		 * because actually we need the address of the next byte
 854		 * after rp->end.
 855		 */
 856		aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
 857
 858		if ((rp->start <= user_paddr) &&
 859		    (user_paddr + user_size) <= aligned_end)
 860			break;
 861	}
 862
 863	if (i > PCI_ROM_RESOURCE)
 864		return -EINVAL;
 865
 866	return 0;
 867}
 868
 869/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
 870 * device mapping.
 871 */
 872static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
 873					     enum pci_mmap_state mmap_state)
 874{
 875	/* Our io_remap_pfn_range takes care of this, do nothing.  */
 876}
 877
 878/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
 879 * for this architecture.  The region in the process to map is described by vm_start
 880 * and vm_end members of VMA, the base physical address is found in vm_pgoff.
 881 * The pci device structure is provided so that architectures may make mapping
 882 * decisions on a per-device or per-bus basis.
 883 *
 884 * Returns a negative error code on failure, zero on success.
 885 */
 886int pci_mmap_page_range(struct pci_dev *dev, int bar,
 887			struct vm_area_struct *vma,
 888			enum pci_mmap_state mmap_state, int write_combine)
 889{
 890	int ret;
 891
 892	ret = __pci_mmap_make_offset(dev, vma, mmap_state);
 893	if (ret < 0)
 894		return ret;
 895
 896	__pci_mmap_set_pgprot(dev, vma, mmap_state);
 897
 898	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 899	ret = io_remap_pfn_range(vma, vma->vm_start,
 900				 vma->vm_pgoff,
 901				 vma->vm_end - vma->vm_start,
 902				 vma->vm_page_prot);
 903	if (ret)
 904		return ret;
 905
 906	return 0;
 907}
 908
 909#ifdef CONFIG_NUMA
 910int pcibus_to_node(struct pci_bus *pbus)
 911{
 912	struct pci_pbm_info *pbm = pbus->sysdata;
 913
 914	return pbm->numa_node;
 915}
 916EXPORT_SYMBOL(pcibus_to_node);
 917#endif
 918
 919/* Return the domain number for this pci bus */
 920
 921int pci_domain_nr(struct pci_bus *pbus)
 922{
 923	struct pci_pbm_info *pbm = pbus->sysdata;
 924	int ret;
 925
 926	if (!pbm) {
 927		ret = -ENXIO;
 928	} else {
 929		ret = pbm->index;
 930	}
 931
 932	return ret;
 933}
 934EXPORT_SYMBOL(pci_domain_nr);
 935
 936#ifdef CONFIG_PCI_MSI
 937int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
 938{
 939	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
 940	unsigned int irq;
 941
 942	if (!pbm->setup_msi_irq)
 943		return -EINVAL;
 944
 945	return pbm->setup_msi_irq(&irq, pdev, desc);
 946}
 947
 948void arch_teardown_msi_irq(unsigned int irq)
 949{
 950	struct msi_desc *entry = irq_get_msi_desc(irq);
 951	struct pci_dev *pdev = msi_desc_to_pci_dev(entry);
 952	struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
 953
 954	if (pbm->teardown_msi_irq)
 955		pbm->teardown_msi_irq(irq, pdev);
 956}
 957#endif /* !(CONFIG_PCI_MSI) */
 958
 959/* ALI sound chips generate 31-bits of DMA, a special register
 960 * determines what bit 31 is emitted as.
 961 */
 962int ali_sound_dma_hack(struct device *dev, u64 device_mask)
 963{
 964	struct iommu *iommu = dev->archdata.iommu;
 965	struct pci_dev *ali_isa_bridge;
 966	u8 val;
 967
 968	if (!dev_is_pci(dev))
 969		return 0;
 970
 971	if (to_pci_dev(dev)->vendor != PCI_VENDOR_ID_AL ||
 972	    to_pci_dev(dev)->device != PCI_DEVICE_ID_AL_M5451 ||
 973	    device_mask != 0x7fffffff)
 974		return 0;
 975
 976	ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
 977					 PCI_DEVICE_ID_AL_M1533,
 978					 NULL);
 979
 980	pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
 981	if (iommu->dma_addr_mask & 0x80000000)
 982		val |= 0x01;
 983	else
 984		val &= ~0x01;
 985	pci_write_config_byte(ali_isa_bridge, 0x7e, val);
 986	pci_dev_put(ali_isa_bridge);
 987	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 988}
 989
 990void pci_resource_to_user(const struct pci_dev *pdev, int bar,
 991			  const struct resource *rp, resource_size_t *start,
 992			  resource_size_t *end)
 993{
 994	struct pci_bus_region region;
 995
 996	/*
 997	 * "User" addresses are shown in /sys/devices/pci.../.../resource
 998	 * and /proc/bus/pci/devices and used as mmap offsets for
 999	 * /proc/bus/pci/BB/DD.F files (see proc_bus_pci_mmap()).
1000	 *
1001	 * On sparc, these are PCI bus addresses, i.e., raw BAR values.
1002	 */
1003	pcibios_resource_to_bus(pdev->bus, &region, (struct resource *) rp);
1004	*start = region.start;
1005	*end = region.end;
1006}
1007
1008void pcibios_set_master(struct pci_dev *dev)
1009{
1010	/* No special bus mastering setup handling */
1011}
1012
1013#ifdef CONFIG_PCI_IOV
1014int pcibios_add_device(struct pci_dev *dev)
1015{
1016	struct pci_dev *pdev;
1017
1018	/* Add sriov arch specific initialization here.
1019	 * Copy dev_archdata from PF to VF
1020	 */
1021	if (dev->is_virtfn) {
1022		struct dev_archdata *psd;
1023
1024		pdev = dev->physfn;
1025		psd = &pdev->dev.archdata;
1026		pci_init_dev_archdata(&dev->dev.archdata, psd->iommu,
1027				      psd->stc, psd->host_controller, NULL,
1028				      psd->numa_node);
1029	}
1030	return 0;
1031}
1032#endif /* CONFIG_PCI_IOV */
1033
1034static int __init pcibios_init(void)
1035{
1036	pci_dfl_cache_line_size = 64 >> 2;
1037	return 0;
1038}
1039subsys_initcall(pcibios_init);
1040
1041#ifdef CONFIG_SYSFS
1042
1043#define SLOT_NAME_SIZE  11  /* Max decimal digits + null in u32 */
1044
1045static void pcie_bus_slot_names(struct pci_bus *pbus)
1046{
1047	struct pci_dev *pdev;
1048	struct pci_bus *bus;
1049
1050	list_for_each_entry(pdev, &pbus->devices, bus_list) {
1051		char name[SLOT_NAME_SIZE];
1052		struct pci_slot *pci_slot;
1053		const u32 *slot_num;
1054		int len;
1055
1056		slot_num = of_get_property(pdev->dev.of_node,
1057					   "physical-slot#", &len);
1058
1059		if (slot_num == NULL || len != 4)
1060			continue;
1061
1062		snprintf(name, sizeof(name), "%u", slot_num[0]);
1063		pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
1064
1065		if (IS_ERR(pci_slot))
1066			pr_err("PCI: pci_create_slot returned %ld.\n",
1067			       PTR_ERR(pci_slot));
1068	}
1069
1070	list_for_each_entry(bus, &pbus->children, node)
1071		pcie_bus_slot_names(bus);
1072}
1073
1074static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
1075{
1076	const struct pci_slot_names {
1077		u32	slot_mask;
1078		char	names[0];
1079	} *prop;
1080	const char *sp;
1081	int len, i;
1082	u32 mask;
1083
1084	prop = of_get_property(node, "slot-names", &len);
1085	if (!prop)
1086		return;
1087
1088	mask = prop->slot_mask;
1089	sp = prop->names;
1090
1091	if (ofpci_verbose)
1092		pci_info(bus, "Making slots for [%pOF] mask[0x%02x]\n",
1093			 node, mask);
1094
1095	i = 0;
1096	while (mask) {
1097		struct pci_slot *pci_slot;
1098		u32 this_bit = 1 << i;
1099
1100		if (!(mask & this_bit)) {
1101			i++;
1102			continue;
1103		}
1104
1105		if (ofpci_verbose)
1106			pci_info(bus, "Making slot [%s]\n", sp);
1107
1108		pci_slot = pci_create_slot(bus, i, sp, NULL);
1109		if (IS_ERR(pci_slot))
1110			pci_err(bus, "pci_create_slot returned %ld\n",
1111				PTR_ERR(pci_slot));
1112
1113		sp += strlen(sp) + 1;
1114		mask &= ~this_bit;
1115		i++;
1116	}
1117}
1118
1119static int __init of_pci_slot_init(void)
1120{
1121	struct pci_bus *pbus = NULL;
1122
1123	while ((pbus = pci_find_next_bus(pbus)) != NULL) {
1124		struct device_node *node;
1125		struct pci_dev *pdev;
1126
1127		pdev = list_first_entry(&pbus->devices, struct pci_dev,
1128					bus_list);
1129
1130		if (pdev && pci_is_pcie(pdev)) {
1131			pcie_bus_slot_names(pbus);
1132		} else {
1133
1134			if (pbus->self) {
1135
1136				/* PCI->PCI bridge */
1137				node = pbus->self->dev.of_node;
1138
1139			} else {
1140				struct pci_pbm_info *pbm = pbus->sysdata;
1141
1142				/* Host PCI controller */
1143				node = pbm->op->dev.of_node;
1144			}
1145
1146			pci_bus_slot_names(node, pbus);
1147		}
1148	}
1149
1150	return 0;
1151}
1152device_initcall(of_pci_slot_init);
1153#endif