Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/*
   2 *	PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *	David Mosberger-Tang
   6 *
   7 *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
 
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
 
  12#include <linux/init.h>
 
 
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
  25#include <asm-generic/pci-bridge.h>
  26#include <asm/setup.h>
 
 
 
  27#include "pci.h"
  28
 
 
  29const char *pci_power_names[] = {
  30	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  31};
  32EXPORT_SYMBOL_GPL(pci_power_names);
  33
 
  34int isa_dma_bridge_buggy;
  35EXPORT_SYMBOL(isa_dma_bridge_buggy);
 
  36
  37int pci_pci_problems;
  38EXPORT_SYMBOL(pci_pci_problems);
  39
  40unsigned int pci_pm_d3_delay;
  41
  42static void pci_pme_list_scan(struct work_struct *work);
  43
  44static LIST_HEAD(pci_pme_list);
  45static DEFINE_MUTEX(pci_pme_list_mutex);
  46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  47
  48struct pci_pme_device {
  49	struct list_head list;
  50	struct pci_dev *dev;
  51};
  52
  53#define PME_TIMEOUT 1000 /* How long between PME checks */
  54
  55static void pci_dev_d3_sleep(struct pci_dev *dev)
  56{
  57	unsigned int delay = dev->d3_delay;
 
  58
  59	if (delay < pci_pm_d3_delay)
  60		delay = pci_pm_d3_delay;
 
 
 
 
 
  61
  62	msleep(delay);
 
 
  63}
  64
  65#ifdef CONFIG_PCI_DOMAINS
  66int pci_domains_supported = 1;
  67#endif
  68
  69#define DEFAULT_CARDBUS_IO_SIZE		(256)
  70#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  74
  75#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  76#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
 
  78unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
 
 
 
 
 
 
 
 
 
  80
 
 
  81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
 
 
 
 
 
 
 
 
 
  82
  83/*
  84 * The default CLS is used if arch didn't set CLS explicitly and not
  85 * all pci devices agree on the same value.  Arch can override either
  86 * the dfl or actual value as it sees fit.  Don't forget this is
  87 * measured in 32-bit words, not bytes.
  88 */
  89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  90u8 pci_cache_line_size;
  91
  92/*
  93 * If we set up a device for bus mastering, we need to check the latency
  94 * timer as certain BIOSes forget to set it properly.
  95 */
  96unsigned int pcibios_max_latency = 255;
  97
  98/* If set, the PCIe ARI capability will not be used. */
  99static bool pcie_ari_disabled;
 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101/**
 102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 103 * @bus: pointer to PCI bus structure to search
 104 *
 105 * Given a PCI bus, returns the highest PCI bus number present in the set
 106 * including the given PCI bus and its list of child PCI buses.
 107 */
 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
 109{
 110	struct list_head *tmp;
 111	unsigned char max, n;
 112
 113	max = bus->subordinate;
 114	list_for_each(tmp, &bus->children) {
 115		n = pci_bus_max_busnr(pci_bus_b(tmp));
 116		if(n > max)
 117			max = n;
 118	}
 119	return max;
 120}
 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123#ifdef CONFIG_HAS_IOMEM
 124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 
 125{
 
 
 
 
 126	/*
 127	 * Make sure the BAR is actually a memory resource, not an IO resource
 128	 */
 129	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 130		WARN_ON(1);
 131		return NULL;
 132	}
 133	return ioremap_nocache(pci_resource_start(pdev, bar),
 134				     pci_resource_len(pdev, bar));
 
 
 
 
 
 
 
 
 135}
 136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 
 
 
 
 
 
 137#endif
 138
 139#if 0
 140/**
 141 * pci_max_busnr - returns maximum PCI bus number
 
 
 
 
 
 
 
 
 
 
 
 
 
 142 *
 143 * Returns the highest PCI bus number present in the system global list of
 144 * PCI buses.
 145 */
 146unsigned char __devinit
 147pci_max_busnr(void)
 148{
 149	struct pci_bus *bus = NULL;
 150	unsigned char max, n;
 
 
 151
 152	max = 0;
 153	while ((bus = pci_find_next_bus(bus)) != NULL) {
 154		n = pci_bus_max_busnr(bus);
 155		if(n > max)
 156			max = n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157	}
 158	return max;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159}
 160
 161#endif  /*  0  */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162
 163#define PCI_FIND_CAP_TTL	48
 
 
 
 164
 165static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 166				   u8 pos, int cap, int *ttl)
 167{
 168	u8 id;
 
 
 
 169
 170	while ((*ttl)--) {
 171		pci_bus_read_config_byte(bus, devfn, pos, &pos);
 172		if (pos < 0x40)
 173			break;
 174		pos &= ~3;
 175		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 176					 &id);
 
 177		if (id == 0xff)
 178			break;
 179		if (id == cap)
 180			return pos;
 181		pos += PCI_CAP_LIST_NEXT;
 182	}
 183	return 0;
 184}
 185
 186static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 187			       u8 pos, int cap)
 188{
 189	int ttl = PCI_FIND_CAP_TTL;
 190
 191	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 192}
 193
 194int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 195{
 196	return __pci_find_next_cap(dev->bus, dev->devfn,
 197				   pos + PCI_CAP_LIST_NEXT, cap);
 198}
 199EXPORT_SYMBOL_GPL(pci_find_next_capability);
 200
 201static int __pci_bus_find_cap_start(struct pci_bus *bus,
 202				    unsigned int devfn, u8 hdr_type)
 203{
 204	u16 status;
 205
 206	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 207	if (!(status & PCI_STATUS_CAP_LIST))
 208		return 0;
 209
 210	switch (hdr_type) {
 211	case PCI_HEADER_TYPE_NORMAL:
 212	case PCI_HEADER_TYPE_BRIDGE:
 213		return PCI_CAPABILITY_LIST;
 214	case PCI_HEADER_TYPE_CARDBUS:
 215		return PCI_CB_CAPABILITY_LIST;
 216	default:
 217		return 0;
 218	}
 219
 220	return 0;
 221}
 222
 223/**
 224 * pci_find_capability - query for devices' capabilities 
 225 * @dev: PCI device to query
 226 * @cap: capability code
 227 *
 228 * Tell if a device supports a given PCI capability.
 229 * Returns the address of the requested capability structure within the
 230 * device's PCI configuration space or 0 in case the device does not
 231 * support it.  Possible values for @cap:
 232 *
 233 *  %PCI_CAP_ID_PM           Power Management 
 234 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 235 *  %PCI_CAP_ID_VPD          Vital Product Data 
 236 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 237 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 238 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 239 *  %PCI_CAP_ID_PCIX         PCI-X
 240 *  %PCI_CAP_ID_EXP          PCI Express
 241 */
 242int pci_find_capability(struct pci_dev *dev, int cap)
 243{
 244	int pos;
 245
 246	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 247	if (pos)
 248		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 249
 250	return pos;
 251}
 
 252
 253/**
 254 * pci_bus_find_capability - query for devices' capabilities 
 255 * @bus:   the PCI bus to query
 256 * @devfn: PCI device to query
 257 * @cap:   capability code
 258 *
 259 * Like pci_find_capability() but works for pci devices that do not have a
 260 * pci_dev structure set up yet. 
 261 *
 262 * Returns the address of the requested capability structure within the
 263 * device's PCI configuration space or 0 in case the device does not
 264 * support it.
 265 */
 266int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 267{
 268	int pos;
 269	u8 hdr_type;
 270
 271	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 272
 273	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 274	if (pos)
 275		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 276
 277	return pos;
 278}
 
 279
 280/**
 281 * pci_find_ext_capability - Find an extended capability
 282 * @dev: PCI device to query
 
 283 * @cap: capability code
 284 *
 285 * Returns the address of the requested extended capability structure
 286 * within the device's PCI configuration space or 0 if the device does
 287 * not support it.  Possible values for @cap:
 288 *
 289 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 290 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 291 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 292 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 293 */
 294int pci_find_ext_capability(struct pci_dev *dev, int cap)
 295{
 296	u32 header;
 297	int ttl;
 298	int pos = PCI_CFG_SPACE_SIZE;
 299
 300	/* minimum 8 bytes per capability */
 301	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 302
 303	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 304		return 0;
 305
 
 
 
 306	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 307		return 0;
 308
 309	/*
 310	 * If we have no capabilities, this is indicated by cap ID,
 311	 * cap version and next pointer all being 0.
 312	 */
 313	if (header == 0)
 314		return 0;
 315
 316	while (ttl-- > 0) {
 317		if (PCI_EXT_CAP_ID(header) == cap)
 318			return pos;
 319
 320		pos = PCI_EXT_CAP_NEXT(header);
 321		if (pos < PCI_CFG_SPACE_SIZE)
 322			break;
 323
 324		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 325			break;
 326	}
 327
 328	return 0;
 329}
 330EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 331
 332/**
 333 * pci_bus_find_ext_capability - find an extended capability
 334 * @bus:   the PCI bus to query
 335 * @devfn: PCI device to query
 336 * @cap:   capability code
 337 *
 338 * Like pci_find_ext_capability() but works for pci devices that do not have a
 339 * pci_dev structure set up yet.
 
 340 *
 341 * Returns the address of the requested capability structure within the
 342 * device's PCI configuration space or 0 in case the device does not
 343 * support it.
 
 344 */
 345int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
 346				int cap)
 347{
 348	u32 header;
 349	int ttl;
 350	int pos = PCI_CFG_SPACE_SIZE;
 351
 352	/* minimum 8 bytes per capability */
 353	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 
 
 
 
 
 
 
 
 
 
 
 
 354
 355	if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 356		return 0;
 357	if (header == 0xffffffff || header == 0)
 358		return 0;
 359
 360	while (ttl-- > 0) {
 361		if (PCI_EXT_CAP_ID(header) == cap)
 362			return pos;
 363
 364		pos = PCI_EXT_CAP_NEXT(header);
 365		if (pos < PCI_CFG_SPACE_SIZE)
 366			break;
 367
 368		if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 369			break;
 370	}
 371
 372	return 0;
 373}
 
 374
 375static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 376{
 377	int rc, ttl = PCI_FIND_CAP_TTL;
 378	u8 cap, mask;
 379
 380	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 381		mask = HT_3BIT_CAP_MASK;
 382	else
 383		mask = HT_5BIT_CAP_MASK;
 384
 385	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 386				      PCI_CAP_ID_HT, &ttl);
 387	while (pos) {
 388		rc = pci_read_config_byte(dev, pos + 3, &cap);
 389		if (rc != PCIBIOS_SUCCESSFUL)
 390			return 0;
 391
 392		if ((cap & mask) == ht_cap)
 393			return pos;
 394
 395		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 396					      pos + PCI_CAP_LIST_NEXT,
 397					      PCI_CAP_ID_HT, &ttl);
 398	}
 399
 400	return 0;
 401}
 
 402/**
 403 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 404 * @dev: PCI device to query
 405 * @pos: Position from which to continue searching
 406 * @ht_cap: Hypertransport capability code
 407 *
 408 * To be used in conjunction with pci_find_ht_capability() to search for
 409 * all capabilities matching @ht_cap. @pos should always be a value returned
 410 * from pci_find_ht_capability().
 411 *
 412 * NB. To be 100% safe against broken PCI devices, the caller should take
 413 * steps to avoid an infinite loop.
 414 */
 415int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 416{
 417	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 418}
 419EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 420
 421/**
 422 * pci_find_ht_capability - query a device's Hypertransport capabilities
 423 * @dev: PCI device to query
 424 * @ht_cap: Hypertransport capability code
 425 *
 426 * Tell if a device supports a given Hypertransport capability.
 427 * Returns an address within the device's PCI configuration space
 428 * or 0 in case the device does not support the request capability.
 429 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 430 * which has a Hypertransport capability matching @ht_cap.
 431 */
 432int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 433{
 434	int pos;
 435
 436	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 437	if (pos)
 438		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 439
 440	return pos;
 441}
 442EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 443
 444/**
 445 * pci_find_parent_resource - return resource region of parent bus of given region
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446 * @dev: PCI device structure contains resources to be searched
 447 * @res: child resource record for which parent is sought
 448 *
 449 *  For given resource region of given device, return the resource
 450 *  region of parent bus the given region is contained in or where
 451 *  it should be allocated from.
 452 */
 453struct resource *
 454pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 455{
 456	const struct pci_bus *bus = dev->bus;
 
 457	int i;
 458	struct resource *best = NULL, *r;
 459
 460	pci_bus_for_each_resource(bus, r, i) {
 461		if (!r)
 462			continue;
 463		if (res->start && !(res->start >= r->start && res->end <= r->end))
 464			continue;	/* Not contained */
 465		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 466			continue;	/* Wrong type */
 467		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 468			return r;	/* Exact match */
 469		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 470		if (r->flags & IORESOURCE_PREFETCH)
 471			continue;
 472		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
 473		if (!best)
 474			best = r;
 
 
 
 
 
 
 
 
 475	}
 476	return best;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477}
 478
 479/**
 480 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 481 * @dev: PCI device to have its BARs restored
 482 *
 483 * Restore the BAR values for a given device, so as to make it
 484 * accessible by its driver.
 485 */
 486static void
 487pci_restore_bars(struct pci_dev *dev)
 488{
 489	int i;
 490
 491	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 492		pci_update_resource(dev, i);
 493}
 494
 495static struct pci_platform_pm_ops *pci_platform_pm;
 
 
 
 496
 497int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 
 
 
 
 498{
 499	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 500	    || !ops->sleep_wake || !ops->can_wakeup)
 501		return -EINVAL;
 502	pci_platform_pm = ops;
 503	return 0;
 504}
 505
 506static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 507{
 508	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 
 
 
 509}
 510
 511static inline int platform_pci_set_power_state(struct pci_dev *dev,
 512                                                pci_power_t t)
 513{
 514	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 
 515}
 516
 517static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 518{
 519	return pci_platform_pm ?
 520			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 
 
 521}
 522
 523static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 524{
 525	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 
 
 
 526}
 527
 528static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 529{
 530	return pci_platform_pm ?
 531			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 
 
 532}
 533
 534static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 535{
 536	return pci_platform_pm ?
 537			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 
 
 538}
 539
 540/**
 541 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 542 *                           given PCI device
 543 * @dev: PCI device to handle.
 544 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 545 *
 546 * RETURN VALUE:
 547 * -EINVAL if the requested state is invalid.
 548 * -EIO if device does not support PCI PM or its PM capabilities register has a
 549 * wrong version, or device doesn't support the requested state.
 550 * 0 if device already is in the requested state.
 551 * 0 if device's power state has been successfully changed.
 552 */
 553static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 554{
 555	u16 pmcsr;
 556	bool need_restore = false;
 
 
 557
 558	/* Check if we're already there */
 559	if (dev->current_state == state)
 560		return 0;
 
 
 
 
 
 
 
 561
 562	if (!dev->pm_cap)
 563		return -EIO;
 
 
 
 
 
 
 
 
 
 
 564
 565	if (state < PCI_D0 || state > PCI_D3hot)
 566		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567
 568	/* Validate current state:
 569	 * Can enter D0 from any state, but if we can only go deeper 
 570	 * to sleep if we're already in a low power state
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 571	 */
 572	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 573	    && dev->current_state > state) {
 574		dev_err(&dev->dev, "invalid power transition "
 575			"(from state %d to %d)\n", dev->current_state, state);
 576		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 577	}
 578
 579	/* check if this device supports the desired state */
 580	if ((state == PCI_D1 && !dev->d1_support)
 581	   || (state == PCI_D2 && !dev->d2_support))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582		return -EIO;
 
 583
 584	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 585
 586	/* If we're (effectively) in D3, force entire word to 0.
 587	 * This doesn't affect PME_Status, disables PME_En, and
 588	 * sets PowerState to 0.
 589	 */
 590	switch (dev->current_state) {
 591	case PCI_D0:
 592	case PCI_D1:
 593	case PCI_D2:
 594		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 595		pmcsr |= state;
 596		break;
 597	case PCI_D3hot:
 598	case PCI_D3cold:
 599	case PCI_UNKNOWN: /* Boot-up */
 600		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 601		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 602			need_restore = true;
 603		/* Fall-through: force to D0 */
 604	default:
 605		pmcsr = 0;
 606		break;
 607	}
 608
 609	/* enter specified state */
 610	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 611
 612	/* Mandatory power management transition delays */
 613	/* see PCI PM 1.1 5.6.1 table 18 */
 614	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 
 
 
 
 
 
 
 
 
 
 
 615		pci_dev_d3_sleep(dev);
 616	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 617		udelay(PCI_PM_D2_DELAY);
 618
 619	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 620	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 621	if (dev->current_state != state && printk_ratelimit())
 622		dev_info(&dev->dev, "Refused to change power state, "
 623			"currently in D%d\n", dev->current_state);
 624
 625	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 626	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 627	 * from D3hot to D0 _may_ perform an internal reset, thereby
 628	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 629	 * For example, at least some versions of the 3c905B and the
 630	 * 3c556B exhibit this behaviour.
 631	 *
 632	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 633	 * devices in a D3hot state at boot.  Consequently, we need to
 634	 * restore at least the BARs so that the device will be
 635	 * accessible to its driver.
 636	 */
 637	if (need_restore)
 638		pci_restore_bars(dev);
 639
 640	if (dev->bus->self)
 641		pcie_aspm_pm_state_change(dev->bus->self);
 642
 643	return 0;
 644}
 645
 646/**
 647 * pci_update_current_state - Read PCI power state of given device from its
 648 *                            PCI PM registers and cache it
 649 * @dev: PCI device to handle.
 650 * @state: State to cache in case the device doesn't have the PM capability
 
 
 
 
 
 
 651 */
 652void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 653{
 654	if (dev->pm_cap) {
 655		u16 pmcsr;
 656
 657		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 658		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 659	} else {
 660		dev->current_state = state;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 661	}
 
 
 662}
 663
 664/**
 665 * pci_platform_power_transition - Use platform to change device power state
 666 * @dev: PCI device to handle.
 667 * @state: State to put the device into.
 668 */
 669static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 670{
 671	int error;
 672
 673	if (platform_pci_power_manageable(dev)) {
 674		error = platform_pci_set_power_state(dev, state);
 675		if (!error)
 676			pci_update_current_state(dev, state);
 677		/* Fall back to PCI_D0 if native PM is not supported */
 678		if (!dev->pm_cap)
 679			dev->current_state = PCI_D0;
 680	} else {
 681		error = -ENODEV;
 682		/* Fall back to PCI_D0 if native PM is not supported */
 683		if (!dev->pm_cap)
 684			dev->current_state = PCI_D0;
 685	}
 686
 687	return error;
 688}
 689
 690/**
 691 * __pci_start_power_transition - Start power transition of a PCI device
 692 * @dev: PCI device to handle.
 693 * @state: State to put the device into.
 694 */
 695static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 696{
 697	if (state == PCI_D0)
 698		pci_platform_power_transition(dev, PCI_D0);
 699}
 700
 701/**
 702 * __pci_complete_power_transition - Complete power transition of a PCI device
 703 * @dev: PCI device to handle.
 704 * @state: State to put the device into.
 705 *
 706 * This function should not be called directly by device drivers.
 
 
 
 
 
 
 
 707 */
 708int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 709{
 710	return state >= PCI_D0 ?
 711			pci_platform_power_transition(dev, state) : -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 712}
 713EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 714
 715/**
 716 * pci_set_power_state - Set the power state of a PCI device
 717 * @dev: PCI device to handle.
 718 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 719 *
 720 * Transition a device to a new power state, using the platform firmware and/or
 721 * the device's PCI PM registers.
 722 *
 723 * RETURN VALUE:
 724 * -EINVAL if the requested state is invalid.
 725 * -EIO if device does not support PCI PM or its PM capabilities register has a
 726 * wrong version, or device doesn't support the requested state.
 
 727 * 0 if device already is in the requested state.
 
 728 * 0 if device's power state has been successfully changed.
 729 */
 730int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 731{
 732	int error;
 733
 734	/* bound the state we're entering */
 735	if (state > PCI_D3hot)
 736		state = PCI_D3hot;
 737	else if (state < PCI_D0)
 738		state = PCI_D0;
 739	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 
 740		/*
 741		 * If the device or the parent bridge do not support PCI PM,
 742		 * ignore the request if we're doing anything other than putting
 743		 * it into D0 (which would only happen on boot).
 
 744		 */
 745		return 0;
 746
 747	__pci_start_power_transition(dev, state);
 748
 749	/* This device is quirked not to be put into D3, so
 750	   don't put it in D3 */
 751	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 752		return 0;
 753
 754	error = pci_raw_set_power_state(dev, state);
 
 755
 756	if (!__pci_complete_power_transition(dev, state))
 757		error = 0;
 758	/*
 759	 * When aspm_policy is "powersave" this call ensures
 760	 * that ASPM is configured.
 761	 */
 762	if (!error && dev->bus->self)
 763		pcie_aspm_powersave_config_link(dev->bus->self);
 764
 765	return error;
 766}
 767
 768/**
 769 * pci_choose_state - Choose the power state of a PCI device
 770 * @dev: PCI device to be suspended
 771 * @state: target sleep state for the whole system. This is the value
 772 *	that is passed to suspend() function.
 773 *
 774 * Returns PCI power state suitable for given device and given system
 775 * message.
 776 */
 777
 778pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 779{
 780	pci_power_t ret;
 
 
 
 781
 782	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 783		return PCI_D0;
 784
 785	ret = platform_pci_choose_state(dev);
 786	if (ret != PCI_POWER_ERROR)
 787		return ret;
 
 
 788
 789	switch (state.event) {
 790	case PM_EVENT_ON:
 791		return PCI_D0;
 792	case PM_EVENT_FREEZE:
 793	case PM_EVENT_PRETHAW:
 794		/* REVISIT both freeze and pre-thaw "should" use D0 */
 795	case PM_EVENT_SUSPEND:
 796	case PM_EVENT_HIBERNATE:
 797		return PCI_D3hot;
 798	default:
 799		dev_info(&dev->dev, "unrecognized suspend event %d\n",
 800			 state.event);
 801		BUG();
 802	}
 803	return PCI_D0;
 804}
 805
 806EXPORT_SYMBOL(pci_choose_state);
 
 
 807
 808#define PCI_EXP_SAVE_REGS	7
 809
 810#define pcie_cap_has_devctl(type, flags)	1
 811#define pcie_cap_has_lnkctl(type, flags)		\
 812		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 813		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 814		  type == PCI_EXP_TYPE_ENDPOINT ||	\
 815		  type == PCI_EXP_TYPE_LEG_END))
 816#define pcie_cap_has_sltctl(type, flags)		\
 817		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 818		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
 819		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
 820		   (flags & PCI_EXP_FLAGS_SLOT))))
 821#define pcie_cap_has_rtctl(type, flags)			\
 822		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 823		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 824		  type == PCI_EXP_TYPE_RC_EC))
 825#define pcie_cap_has_devctl2(type, flags)		\
 826		((flags & PCI_EXP_FLAGS_VERS) > 1)
 827#define pcie_cap_has_lnkctl2(type, flags)		\
 828		((flags & PCI_EXP_FLAGS_VERS) > 1)
 829#define pcie_cap_has_sltctl2(type, flags)		\
 830		((flags & PCI_EXP_FLAGS_VERS) > 1)
 831
 832static struct pci_cap_saved_state *pci_find_saved_cap(
 833	struct pci_dev *pci_dev, char cap)
 834{
 835	struct pci_cap_saved_state *tmp;
 836	struct hlist_node *pos;
 837
 838	hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
 839		if (tmp->cap.cap_nr == cap)
 840			return tmp;
 841	}
 842	return NULL;
 843}
 844
 
 
 
 
 
 
 
 
 
 
 845static int pci_save_pcie_state(struct pci_dev *dev)
 846{
 847	int pos, i = 0;
 848	struct pci_cap_saved_state *save_state;
 849	u16 *cap;
 850	u16 flags;
 851
 852	pos = pci_pcie_cap(dev);
 853	if (!pos)
 854		return 0;
 855
 856	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 857	if (!save_state) {
 858		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 859		return -ENOMEM;
 860	}
 
 861	cap = (u16 *)&save_state->cap.data[0];
 
 
 
 
 
 
 
 862
 863	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 
 864
 865	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 866		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
 867	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 868		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 869	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 870		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 871	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 872		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
 873	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 874		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
 875	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 876		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
 877	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 878		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
 879
 880	return 0;
 
 
 
 
 
 
 
 
 
 881}
 882
 883static void pci_restore_pcie_state(struct pci_dev *dev)
 884{
 885	int i = 0, pos;
 886	struct pci_cap_saved_state *save_state;
 887	u16 *cap;
 888	u16 flags;
 889
 890	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 891	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 892	if (!save_state || pos <= 0)
 893		return;
 894	cap = (u16 *)&save_state->cap.data[0];
 895
 896	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 
 
 
 
 
 897
 898	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 899		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
 900	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 901		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
 902	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 903		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
 904	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 905		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
 906	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 907		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
 908	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 909		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
 910	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 911		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
 912}
 913
 914
 915static int pci_save_pcix_state(struct pci_dev *dev)
 916{
 917	int pos;
 918	struct pci_cap_saved_state *save_state;
 919
 920	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 921	if (pos <= 0)
 922		return 0;
 923
 924	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 925	if (!save_state) {
 926		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 927		return -ENOMEM;
 928	}
 929
 930	pci_read_config_word(dev, pos + PCI_X_CMD,
 931			     (u16 *)save_state->cap.data);
 932
 933	return 0;
 934}
 935
 936static void pci_restore_pcix_state(struct pci_dev *dev)
 937{
 938	int i = 0, pos;
 939	struct pci_cap_saved_state *save_state;
 940	u16 *cap;
 941
 942	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 943	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 944	if (!save_state || pos <= 0)
 945		return;
 946	cap = (u16 *)&save_state->cap.data[0];
 947
 948	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 949}
 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951
 952/**
 953 * pci_save_state - save the PCI configuration space of a device before suspending
 954 * @dev: - PCI device that we're dealing with
 
 955 */
 956int
 957pci_save_state(struct pci_dev *dev)
 958{
 959	int i;
 960	/* XXX: 100% dword access ok here? */
 961	for (i = 0; i < 16; i++)
 962		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 
 
 
 963	dev->state_saved = true;
 964	if ((i = pci_save_pcie_state(dev)) != 0)
 
 
 965		return i;
 966	if ((i = pci_save_pcix_state(dev)) != 0)
 
 
 967		return i;
 968	return 0;
 
 
 
 
 
 969}
 
 970
 971static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 972				     u32 saved_val, int retry)
 973{
 974	u32 val;
 975
 976	pci_read_config_dword(pdev, offset, &val);
 977	if (val == saved_val)
 978		return;
 979
 980	for (;;) {
 981		dev_dbg(&pdev->dev, "restoring config space at offset "
 982			"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
 983		pci_write_config_dword(pdev, offset, saved_val);
 984		if (retry-- <= 0)
 985			return;
 986
 987		pci_read_config_dword(pdev, offset, &val);
 988		if (val == saved_val)
 989			return;
 990
 991		mdelay(1);
 992	}
 993}
 994
 995static void pci_restore_config_space_range(struct pci_dev *pdev,
 996					   int start, int end, int retry)
 
 997{
 998	int index;
 999
1000	for (index = end; index >= start; index--)
1001		pci_restore_config_dword(pdev, 4 * index,
1002					 pdev->saved_config_space[index],
1003					 retry);
1004}
1005
1006static void pci_restore_config_space(struct pci_dev *pdev)
1007{
1008	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1009		pci_restore_config_space_range(pdev, 10, 15, 0);
1010		/* Restore BARs before the command register. */
1011		pci_restore_config_space_range(pdev, 4, 9, 10);
1012		pci_restore_config_space_range(pdev, 0, 3, 0);
 
 
 
 
 
 
 
 
 
 
1013	} else {
1014		pci_restore_config_space_range(pdev, 0, 15, 0);
1015	}
1016}
1017
1018/** 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1019 * pci_restore_state - Restore the saved state of a PCI device
1020 * @dev: - PCI device that we're dealing with
1021 */
1022void pci_restore_state(struct pci_dev *dev)
1023{
1024	if (!dev->state_saved)
1025		return;
1026
1027	/* PCI Express register must be restored first */
 
 
 
 
 
1028	pci_restore_pcie_state(dev);
 
 
1029	pci_restore_ats_state(dev);
 
 
 
 
 
 
 
1030
1031	pci_restore_config_space(dev);
1032
1033	pci_restore_pcix_state(dev);
1034	pci_restore_msi_state(dev);
 
 
 
1035	pci_restore_iov_state(dev);
1036
1037	dev->state_saved = false;
1038}
 
1039
1040struct pci_saved_state {
1041	u32 config_space[16];
1042	struct pci_cap_saved_data cap[0];
1043};
1044
1045/**
1046 * pci_store_saved_state - Allocate and return an opaque struct containing
1047 *			   the device saved state.
1048 * @dev: PCI device that we're dealing with
1049 *
1050 * Rerturn NULL if no state or error.
1051 */
1052struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1053{
1054	struct pci_saved_state *state;
1055	struct pci_cap_saved_state *tmp;
1056	struct pci_cap_saved_data *cap;
1057	struct hlist_node *pos;
1058	size_t size;
1059
1060	if (!dev->state_saved)
1061		return NULL;
1062
1063	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1064
1065	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1066		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1067
1068	state = kzalloc(size, GFP_KERNEL);
1069	if (!state)
1070		return NULL;
1071
1072	memcpy(state->config_space, dev->saved_config_space,
1073	       sizeof(state->config_space));
1074
1075	cap = state->cap;
1076	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1077		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1078		memcpy(cap, &tmp->cap, len);
1079		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1080	}
1081	/* Empty cap_save terminates list */
1082
1083	return state;
1084}
1085EXPORT_SYMBOL_GPL(pci_store_saved_state);
1086
1087/**
1088 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1089 * @dev: PCI device that we're dealing with
1090 * @state: Saved state returned from pci_store_saved_state()
1091 */
1092int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
 
1093{
1094	struct pci_cap_saved_data *cap;
1095
1096	dev->state_saved = false;
1097
1098	if (!state)
1099		return 0;
1100
1101	memcpy(dev->saved_config_space, state->config_space,
1102	       sizeof(state->config_space));
1103
1104	cap = state->cap;
1105	while (cap->size) {
1106		struct pci_cap_saved_state *tmp;
1107
1108		tmp = pci_find_saved_cap(dev, cap->cap_nr);
1109		if (!tmp || tmp->cap.size != cap->size)
1110			return -EINVAL;
1111
1112		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1113		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1114		       sizeof(struct pci_cap_saved_data) + cap->size);
1115	}
1116
1117	dev->state_saved = true;
1118	return 0;
1119}
1120EXPORT_SYMBOL_GPL(pci_load_saved_state);
1121
1122/**
1123 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1124 *				   and free the memory allocated for it.
1125 * @dev: PCI device that we're dealing with
1126 * @state: Pointer to saved state returned from pci_store_saved_state()
1127 */
1128int pci_load_and_free_saved_state(struct pci_dev *dev,
1129				  struct pci_saved_state **state)
1130{
1131	int ret = pci_load_saved_state(dev, *state);
1132	kfree(*state);
1133	*state = NULL;
1134	return ret;
1135}
1136EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1137
 
 
 
 
 
1138static int do_pci_enable_device(struct pci_dev *dev, int bars)
1139{
1140	int err;
 
 
 
1141
1142	err = pci_set_power_state(dev, PCI_D0);
1143	if (err < 0 && err != -EIO)
1144		return err;
 
 
 
 
 
1145	err = pcibios_enable_device(dev, bars);
1146	if (err < 0)
1147		return err;
1148	pci_fixup_device(pci_fixup_enable, dev);
1149
 
 
 
 
 
 
 
 
 
 
 
1150	return 0;
1151}
1152
1153/**
1154 * pci_reenable_device - Resume abandoned device
1155 * @dev: PCI device to be resumed
1156 *
1157 *  Note this function is a backend of pci_default_resume and is not supposed
1158 *  to be called by normal code, write proper resume handler and use it instead.
1159 */
1160int pci_reenable_device(struct pci_dev *dev)
1161{
1162	if (pci_is_enabled(dev))
1163		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1164	return 0;
1165}
 
1166
1167static int __pci_enable_device_flags(struct pci_dev *dev,
1168				     resource_size_t flags)
1169{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170	int err;
1171	int i, bars = 0;
1172
1173	/*
1174	 * Power state could be unknown at this point, either due to a fresh
1175	 * boot or a device removal call.  So get the current power state
1176	 * so that things like MSI message writing will behave as expected
1177	 * (e.g. if the device really is in D0 at enable time).
1178	 */
1179	if (dev->pm_cap) {
1180		u16 pmcsr;
1181		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1182		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1183	}
1184
1185	if (atomic_add_return(1, &dev->enable_cnt) > 1)
1186		return 0;		/* already enabled */
1187
 
 
 
 
1188	/* only skip sriov related */
1189	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1190		if (dev->resource[i].flags & flags)
1191			bars |= (1 << i);
1192	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1193		if (dev->resource[i].flags & flags)
1194			bars |= (1 << i);
1195
1196	err = do_pci_enable_device(dev, bars);
1197	if (err < 0)
1198		atomic_dec(&dev->enable_cnt);
1199	return err;
1200}
1201
1202/**
1203 * pci_enable_device_io - Initialize a device for use with IO space
1204 * @dev: PCI device to be initialized
1205 *
1206 *  Initialize device before it's used by a driver. Ask low-level code
1207 *  to enable I/O resources. Wake up the device if it was suspended.
1208 *  Beware, this function can fail.
1209 */
1210int pci_enable_device_io(struct pci_dev *dev)
1211{
1212	return __pci_enable_device_flags(dev, IORESOURCE_IO);
1213}
 
1214
1215/**
1216 * pci_enable_device_mem - Initialize a device for use with Memory space
1217 * @dev: PCI device to be initialized
1218 *
1219 *  Initialize device before it's used by a driver. Ask low-level code
1220 *  to enable Memory resources. Wake up the device if it was suspended.
1221 *  Beware, this function can fail.
1222 */
1223int pci_enable_device_mem(struct pci_dev *dev)
1224{
1225	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1226}
 
1227
1228/**
1229 * pci_enable_device - Initialize device before it's used by a driver.
1230 * @dev: PCI device to be initialized
1231 *
1232 *  Initialize device before it's used by a driver. Ask low-level code
1233 *  to enable I/O and memory. Wake up the device if it was suspended.
1234 *  Beware, this function can fail.
1235 *
1236 *  Note we don't actually enable the device many times if we call
1237 *  this function repeatedly (we just increment the count).
1238 */
1239int pci_enable_device(struct pci_dev *dev)
1240{
1241	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1242}
 
1243
1244/*
1245 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1246 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1247 * there's no need to track it separately.  pci_devres is initialized
1248 * when a device is enabled using managed PCI device enable interface.
1249 */
1250struct pci_devres {
1251	unsigned int enabled:1;
1252	unsigned int pinned:1;
1253	unsigned int orig_intx:1;
1254	unsigned int restore_intx:1;
 
1255	u32 region_mask;
1256};
1257
1258static void pcim_release(struct device *gendev, void *res)
1259{
1260	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1261	struct pci_devres *this = res;
1262	int i;
1263
1264	if (dev->msi_enabled)
1265		pci_disable_msi(dev);
1266	if (dev->msix_enabled)
1267		pci_disable_msix(dev);
1268
1269	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1270		if (this->region_mask & (1 << i))
1271			pci_release_region(dev, i);
1272
 
 
 
1273	if (this->restore_intx)
1274		pci_intx(dev, this->orig_intx);
1275
1276	if (this->enabled && !this->pinned)
1277		pci_disable_device(dev);
1278}
1279
1280static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1281{
1282	struct pci_devres *dr, *new_dr;
1283
1284	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1285	if (dr)
1286		return dr;
1287
1288	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1289	if (!new_dr)
1290		return NULL;
1291	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1292}
1293
1294static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1295{
1296	if (pci_is_managed(pdev))
1297		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1298	return NULL;
1299}
1300
1301/**
1302 * pcim_enable_device - Managed pci_enable_device()
1303 * @pdev: PCI device to be initialized
1304 *
1305 * Managed pci_enable_device().
1306 */
1307int pcim_enable_device(struct pci_dev *pdev)
1308{
1309	struct pci_devres *dr;
1310	int rc;
1311
1312	dr = get_pci_dr(pdev);
1313	if (unlikely(!dr))
1314		return -ENOMEM;
1315	if (dr->enabled)
1316		return 0;
1317
1318	rc = pci_enable_device(pdev);
1319	if (!rc) {
1320		pdev->is_managed = 1;
1321		dr->enabled = 1;
1322	}
1323	return rc;
1324}
 
1325
1326/**
1327 * pcim_pin_device - Pin managed PCI device
1328 * @pdev: PCI device to pin
1329 *
1330 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1331 * driver detach.  @pdev must have been enabled with
1332 * pcim_enable_device().
1333 */
1334void pcim_pin_device(struct pci_dev *pdev)
1335{
1336	struct pci_devres *dr;
1337
1338	dr = find_pci_dr(pdev);
1339	WARN_ON(!dr || !dr->enabled);
1340	if (dr)
1341		dr->pinned = 1;
1342}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1343
1344/**
1345 * pcibios_disable_device - disable arch specific PCI resources for device dev
1346 * @dev: the PCI device to disable
1347 *
1348 * Disables architecture specific PCI resources for the device. This
1349 * is the default implementation. Architecture implementations can
1350 * override this.
1351 */
1352void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
 
 
 
 
 
 
 
 
 
 
 
1353
1354static void do_pci_disable_device(struct pci_dev *dev)
1355{
1356	u16 pci_command;
1357
1358	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1359	if (pci_command & PCI_COMMAND_MASTER) {
1360		pci_command &= ~PCI_COMMAND_MASTER;
1361		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1362	}
1363
1364	pcibios_disable_device(dev);
1365}
1366
1367/**
1368 * pci_disable_enabled_device - Disable device without updating enable_cnt
1369 * @dev: PCI device to disable
1370 *
1371 * NOTE: This function is a backend of PCI power management routines and is
1372 * not supposed to be called drivers.
1373 */
1374void pci_disable_enabled_device(struct pci_dev *dev)
1375{
1376	if (pci_is_enabled(dev))
1377		do_pci_disable_device(dev);
1378}
1379
1380/**
1381 * pci_disable_device - Disable PCI device after use
1382 * @dev: PCI device to be disabled
1383 *
1384 * Signal to the system that the PCI device is not in use by the system
1385 * anymore.  This only involves disabling PCI bus-mastering, if active.
1386 *
1387 * Note we don't actually disable the device until all callers of
1388 * pci_enable_device() have called pci_disable_device().
1389 */
1390void
1391pci_disable_device(struct pci_dev *dev)
1392{
1393	struct pci_devres *dr;
1394
1395	dr = find_pci_dr(dev);
1396	if (dr)
1397		dr->enabled = 0;
1398
1399	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
 
 
 
1400		return;
1401
1402	do_pci_disable_device(dev);
1403
1404	dev->is_busmaster = 0;
1405}
 
1406
1407/**
1408 * pcibios_set_pcie_reset_state - set reset state for device dev
1409 * @dev: the PCIe device reset
1410 * @state: Reset state to enter into
1411 *
1412 *
1413 * Sets the PCIe reset state for the device. This is the default
1414 * implementation. Architecture implementations can override this.
1415 */
1416int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1417							enum pcie_reset_state state)
1418{
1419	return -EINVAL;
1420}
1421
1422/**
1423 * pci_set_pcie_reset_state - set reset state for device dev
1424 * @dev: the PCIe device reset
1425 * @state: Reset state to enter into
1426 *
1427 *
1428 * Sets the PCI reset state for the device.
1429 */
1430int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1431{
1432	return pcibios_set_pcie_reset_state(dev, state);
1433}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434
1435/**
1436 * pci_check_pme_status - Check if given device has generated PME.
1437 * @dev: Device to check.
1438 *
1439 * Check the PME status of the device and if set, clear it and clear PME enable
1440 * (if set).  Return 'true' if PME status and PME enable were both set or
1441 * 'false' otherwise.
1442 */
1443bool pci_check_pme_status(struct pci_dev *dev)
1444{
1445	int pmcsr_pos;
1446	u16 pmcsr;
1447	bool ret = false;
1448
1449	if (!dev->pm_cap)
1450		return false;
1451
1452	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1453	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1454	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1455		return false;
1456
1457	/* Clear PME status. */
1458	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1459	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1460		/* Disable PME to avoid interrupt flood. */
1461		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1462		ret = true;
1463	}
1464
1465	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1466
1467	return ret;
1468}
1469
1470/**
1471 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1472 * @dev: Device to handle.
1473 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1474 *
1475 * Check if @dev has generated PME and queue a resume request for it in that
1476 * case.
1477 */
1478static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1479{
1480	if (pme_poll_reset && dev->pme_poll)
1481		dev->pme_poll = false;
1482
1483	if (pci_check_pme_status(dev)) {
1484		pci_wakeup_event(dev);
1485		pm_request_resume(&dev->dev);
1486	}
1487	return 0;
1488}
1489
1490/**
1491 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1492 * @bus: Top bus of the subtree to walk.
1493 */
1494void pci_pme_wakeup_bus(struct pci_bus *bus)
1495{
1496	if (bus)
1497		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1498}
1499
 
1500/**
1501 * pci_pme_capable - check the capability of PCI device to generate PME#
1502 * @dev: PCI device to handle.
1503 * @state: PCI state from which device will issue PME#.
1504 */
1505bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1506{
1507	if (!dev->pm_cap)
1508		return false;
1509
1510	return !!(dev->pme_support & (1 << state));
1511}
 
1512
1513static void pci_pme_list_scan(struct work_struct *work)
1514{
1515	struct pci_pme_device *pme_dev, *n;
1516
1517	mutex_lock(&pci_pme_list_mutex);
1518	if (!list_empty(&pci_pme_list)) {
1519		list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1520			if (pme_dev->dev->pme_poll) {
1521				pci_pme_wakeup(pme_dev->dev, NULL);
1522			} else {
1523				list_del(&pme_dev->list);
1524				kfree(pme_dev);
1525			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526		}
1527		if (!list_empty(&pci_pme_list))
1528			schedule_delayed_work(&pci_pme_work,
1529					      msecs_to_jiffies(PME_TIMEOUT));
1530	}
 
 
 
1531	mutex_unlock(&pci_pme_list_mutex);
1532}
1533
1534/**
1535 * pci_pme_active - enable or disable PCI device's PME# function
1536 * @dev: PCI device to handle.
1537 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1538 *
1539 * The caller must verify that the device is capable of generating PME# before
1540 * calling this function with @enable equal to 'true'.
1541 */
1542void pci_pme_active(struct pci_dev *dev, bool enable)
1543{
1544	u16 pmcsr;
1545
1546	if (!dev->pm_cap)
1547		return;
1548
1549	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1550	/* Clear PME_Status by writing 1 to it and enable PME# */
1551	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1552	if (!enable)
1553		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1554
1555	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1556
1557	/* PCI (as opposed to PCIe) PME requires that the device have
1558	   its PME# line hooked up correctly. Not all hardware vendors
1559	   do this, so the PME never gets delivered and the device
1560	   remains asleep. The easiest way around this is to
1561	   periodically walk the list of suspended devices and check
1562	   whether any have their PME flag set. The assumption is that
1563	   we'll wake up often enough anyway that this won't be a huge
1564	   hit, and the power savings from the devices will still be a
1565	   win. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1566
1567	if (dev->pme_poll) {
1568		struct pci_pme_device *pme_dev;
1569		if (enable) {
1570			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1571					  GFP_KERNEL);
1572			if (!pme_dev)
1573				goto out;
 
 
1574			pme_dev->dev = dev;
1575			mutex_lock(&pci_pme_list_mutex);
1576			list_add(&pme_dev->list, &pci_pme_list);
1577			if (list_is_singular(&pci_pme_list))
1578				schedule_delayed_work(&pci_pme_work,
1579						      msecs_to_jiffies(PME_TIMEOUT));
 
1580			mutex_unlock(&pci_pme_list_mutex);
1581		} else {
1582			mutex_lock(&pci_pme_list_mutex);
1583			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1584				if (pme_dev->dev == dev) {
1585					list_del(&pme_dev->list);
1586					kfree(pme_dev);
1587					break;
1588				}
1589			}
1590			mutex_unlock(&pci_pme_list_mutex);
1591		}
1592	}
1593
1594out:
1595	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1596}
 
1597
1598/**
1599 * __pci_enable_wake - enable PCI device as wakeup event source
1600 * @dev: PCI device affected
1601 * @state: PCI state from which device will issue wakeup events
1602 * @runtime: True if the events are to be generated at run time
1603 * @enable: True to enable event generation; false to disable
1604 *
1605 * This enables the device as a wakeup event source, or disables it.
1606 * When such events involves platform-specific hooks, those hooks are
1607 * called automatically by this routine.
1608 *
1609 * Devices with legacy power management (no standard PCI PM capabilities)
1610 * always require such platform hooks.
1611 *
1612 * RETURN VALUE:
1613 * 0 is returned on success
1614 * -EINVAL is returned if device is not supposed to wake up the system
1615 * Error code depending on the platform is returned if both the platform and
1616 * the native mechanism fail to enable the generation of wake-up events
1617 */
1618int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1619		      bool runtime, bool enable)
1620{
1621	int ret = 0;
1622
1623	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1624		return -EINVAL;
 
 
 
 
 
 
 
1625
1626	/* Don't do the same thing twice in a row for one device. */
1627	if (!!enable == !!dev->wakeup_prepared)
1628		return 0;
1629
1630	/*
1631	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1632	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1633	 * enable.  To disable wake-up we call the platform first, for symmetry.
1634	 */
1635
1636	if (enable) {
1637		int error;
1638
1639		if (pci_pme_capable(dev, state))
 
 
 
 
 
 
 
1640			pci_pme_active(dev, true);
1641		else
1642			ret = 1;
1643		error = runtime ? platform_pci_run_wake(dev, true) :
1644					platform_pci_sleep_wake(dev, true);
1645		if (ret)
1646			ret = error;
1647		if (!ret)
1648			dev->wakeup_prepared = true;
1649	} else {
1650		if (runtime)
1651			platform_pci_run_wake(dev, false);
1652		else
1653			platform_pci_sleep_wake(dev, false);
1654		pci_pme_active(dev, false);
1655		dev->wakeup_prepared = false;
1656	}
1657
1658	return ret;
1659}
1660EXPORT_SYMBOL(__pci_enable_wake);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661
1662/**
1663 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1664 * @dev: PCI device to prepare
1665 * @enable: True to enable wake-up event generation; false to disable
1666 *
1667 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1668 * and this function allows them to set that up cleanly - pci_enable_wake()
1669 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1670 * ordering constraints.
1671 *
1672 * This function only returns error code if the device is not capable of
1673 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1674 * enable wake-up power for it.
1675 */
1676int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1677{
1678	return pci_pme_capable(dev, PCI_D3cold) ?
1679			pci_enable_wake(dev, PCI_D3cold, enable) :
1680			pci_enable_wake(dev, PCI_D3hot, enable);
1681}
 
1682
1683/**
1684 * pci_target_state - find an appropriate low power state for a given PCI dev
1685 * @dev: PCI device
 
1686 *
1687 * Use underlying platform code to find a supported low power state for @dev.
1688 * If the platform can't manage @dev, return the deepest state from which it
1689 * can generate wake events, based on any available PME info.
1690 */
1691pci_power_t pci_target_state(struct pci_dev *dev)
1692{
1693	pci_power_t target_state = PCI_D3hot;
1694
1695	if (platform_pci_power_manageable(dev)) {
1696		/*
1697		 * Call the platform to choose the target state of the device
1698		 * and enable wake-up from this state if supported.
1699		 */
1700		pci_power_t state = platform_pci_choose_state(dev);
1701
1702		switch (state) {
1703		case PCI_POWER_ERROR:
1704		case PCI_UNKNOWN:
1705			break;
 
1706		case PCI_D1:
1707		case PCI_D2:
1708			if (pci_no_d1d2(dev))
1709				break;
1710		default:
1711			target_state = state;
1712		}
1713	} else if (!dev->pm_cap) {
1714		target_state = PCI_D0;
1715	} else if (device_may_wakeup(&dev->dev)) {
 
 
 
 
 
 
 
 
 
 
 
 
1716		/*
1717		 * Find the deepest state from which the device can generate
1718		 * wake-up events, make it the target state and enable device
1719		 * to generate PME#.
1720		 */
1721		if (dev->pme_support) {
1722			while (target_state
1723			      && !(dev->pme_support & (1 << target_state)))
1724				target_state--;
1725		}
 
 
1726	}
1727
1728	return target_state;
1729}
1730
1731/**
1732 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
 
1733 * @dev: Device to handle.
1734 *
1735 * Choose the power state appropriate for the device depending on whether
1736 * it can wake up the system and/or is power manageable by the platform
1737 * (PCI_D3hot is the default) and put the device into that state.
1738 */
1739int pci_prepare_to_sleep(struct pci_dev *dev)
1740{
1741	pci_power_t target_state = pci_target_state(dev);
 
1742	int error;
1743
1744	if (target_state == PCI_POWER_ERROR)
1745		return -EIO;
1746
1747	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1748
1749	error = pci_set_power_state(dev, target_state);
1750
1751	if (error)
1752		pci_enable_wake(dev, target_state, false);
1753
1754	return error;
1755}
 
1756
1757/**
1758 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
 
1759 * @dev: Device to handle.
1760 *
1761 * Disable device's system wake-up capability and put it into D0.
1762 */
1763int pci_back_from_sleep(struct pci_dev *dev)
1764{
 
 
 
 
 
1765	pci_enable_wake(dev, PCI_D0, false);
1766	return pci_set_power_state(dev, PCI_D0);
1767}
 
1768
1769/**
1770 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1771 * @dev: PCI device being suspended.
1772 *
1773 * Prepare @dev to generate wake-up events at run time and put it into a low
1774 * power state.
1775 */
1776int pci_finish_runtime_suspend(struct pci_dev *dev)
1777{
1778	pci_power_t target_state = pci_target_state(dev);
1779	int error;
1780
 
1781	if (target_state == PCI_POWER_ERROR)
1782		return -EIO;
1783
1784	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1785
1786	error = pci_set_power_state(dev, target_state);
1787
1788	if (error)
1789		__pci_enable_wake(dev, target_state, true, false);
1790
1791	return error;
1792}
1793
1794/**
1795 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1796 * @dev: Device to check.
1797 *
1798 * Return true if the device itself is cabable of generating wake-up events
1799 * (through the platform or using the native PCIe PME) or if the device supports
1800 * PME and one of its upstream bridges can generate wake-up events.
1801 */
1802bool pci_dev_run_wake(struct pci_dev *dev)
1803{
1804	struct pci_bus *bus = dev->bus;
1805
1806	if (device_run_wake(&dev->dev))
1807		return true;
1808
1809	if (!dev->pme_support)
1810		return false;
1811
 
 
 
 
 
 
 
1812	while (bus->parent) {
1813		struct pci_dev *bridge = bus->self;
1814
1815		if (device_run_wake(&bridge->dev))
1816			return true;
1817
1818		bus = bus->parent;
1819	}
1820
1821	/* We have reached the root bus. */
1822	if (bus->bridge)
1823		return device_run_wake(bus->bridge);
1824
1825	return false;
1826}
1827EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1828
1829/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1830 * pci_pm_init - Initialize PM functions of given PCI device
1831 * @dev: PCI device to handle.
1832 */
1833void pci_pm_init(struct pci_dev *dev)
1834{
1835	int pm;
 
1836	u16 pmc;
1837
1838	pm_runtime_forbid(&dev->dev);
 
 
1839	device_enable_async_suspend(&dev->dev);
1840	dev->wakeup_prepared = false;
1841
1842	dev->pm_cap = 0;
 
1843
1844	/* find PCI PM capability in list */
1845	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1846	if (!pm)
1847		return;
1848	/* Check device's ability to generate PME# */
1849	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1850
1851	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1852		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1853			pmc & PCI_PM_CAP_VER_MASK);
1854		return;
1855	}
1856
1857	dev->pm_cap = pm;
1858	dev->d3_delay = PCI_PM_D3_WAIT;
 
 
 
1859
1860	dev->d1_support = false;
1861	dev->d2_support = false;
1862	if (!pci_no_d1d2(dev)) {
1863		if (pmc & PCI_PM_CAP_D1)
1864			dev->d1_support = true;
1865		if (pmc & PCI_PM_CAP_D2)
1866			dev->d2_support = true;
1867
1868		if (dev->d1_support || dev->d2_support)
1869			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1870				   dev->d1_support ? " D1" : "",
1871				   dev->d2_support ? " D2" : "");
1872	}
1873
1874	pmc &= PCI_PM_CAP_PME_MASK;
1875	if (pmc) {
1876		dev_printk(KERN_DEBUG, &dev->dev,
1877			 "PME# supported from%s%s%s%s%s\n",
1878			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1879			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1880			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1881			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1882			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1883		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1884		dev->pme_poll = true;
1885		/*
1886		 * Make device's PM flags reflect the wake-up capability, but
1887		 * let the user space enable it to wake up the system as needed.
1888		 */
1889		device_set_wakeup_capable(&dev->dev, true);
1890		/* Disable the PME# generation functionality */
1891		pci_pme_active(dev, false);
1892	} else {
1893		dev->pme_support = 0;
1894	}
 
 
 
 
1895}
1896
1897/**
1898 * platform_pci_wakeup_init - init platform wakeup if present
1899 * @dev: PCI device
1900 *
1901 * Some devices don't have PCI PM caps but can still generate wakeup
1902 * events through platform methods (like ACPI events).  If @dev supports
1903 * platform wakeup events, set the device flag to indicate as much.  This
1904 * may be redundant if the device also supports PCI PM caps, but double
1905 * initialization should be safe in that case.
1906 */
1907void platform_pci_wakeup_init(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1908{
1909	if (!platform_pci_can_wakeup(dev))
 
 
 
 
 
 
 
1910		return;
1911
1912	device_set_wakeup_capable(&dev->dev, true);
1913	platform_pci_sleep_wake(dev, false);
 
 
 
 
 
 
 
 
 
 
 
 
1914}
1915
1916static void pci_add_saved_cap(struct pci_dev *pci_dev,
1917	struct pci_cap_saved_state *new_cap)
1918{
1919	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1920}
1921
1922/**
1923 * pci_add_save_buffer - allocate buffer for saving given capability registers
 
1924 * @dev: the PCI device
1925 * @cap: the capability to allocate the buffer for
 
1926 * @size: requested size of the buffer
1927 */
1928static int pci_add_cap_save_buffer(
1929	struct pci_dev *dev, char cap, unsigned int size)
1930{
1931	int pos;
1932	struct pci_cap_saved_state *save_state;
1933
1934	pos = pci_find_capability(dev, cap);
1935	if (pos <= 0)
 
 
 
 
1936		return 0;
1937
1938	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1939	if (!save_state)
1940		return -ENOMEM;
1941
1942	save_state->cap.cap_nr = cap;
 
1943	save_state->cap.size = size;
1944	pci_add_saved_cap(dev, save_state);
1945
1946	return 0;
1947}
1948
 
 
 
 
 
 
 
 
 
 
1949/**
1950 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1951 * @dev: the PCI device
1952 */
1953void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1954{
1955	int error;
1956
1957	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1958					PCI_EXP_SAVE_REGS * sizeof(u16));
1959	if (error)
1960		dev_err(&dev->dev,
1961			"unable to preallocate PCI Express save buffer\n");
1962
1963	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1964	if (error)
1965		dev_err(&dev->dev,
1966			"unable to preallocate PCI-X save buffer\n");
 
 
 
 
 
 
1967}
1968
1969void pci_free_cap_save_buffers(struct pci_dev *dev)
1970{
1971	struct pci_cap_saved_state *tmp;
1972	struct hlist_node *pos, *n;
1973
1974	hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1975		kfree(tmp);
1976}
1977
1978/**
1979 * pci_enable_ari - enable ARI forwarding if hardware support it
1980 * @dev: the PCI device
 
 
 
1981 */
1982void pci_enable_ari(struct pci_dev *dev)
1983{
1984	int pos;
1985	u32 cap;
1986	u16 flags, ctrl;
1987	struct pci_dev *bridge;
1988
1989	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
1990		return;
1991
1992	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1993	if (!pos)
1994		return;
1995
1996	bridge = dev->bus->self;
1997	if (!bridge || !pci_is_pcie(bridge))
1998		return;
1999
2000	pos = pci_pcie_cap(bridge);
2001	if (!pos)
2002		return;
2003
2004	/* ARI is a PCIe v2 feature */
2005	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2006	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2007		return;
2008
2009	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2010	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2011		return;
2012
2013	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
2014	ctrl |= PCI_EXP_DEVCTL2_ARI;
2015	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2016
2017	bridge->ari_enabled = 1;
 
 
 
 
2018}
2019
2020/**
2021 * pci_enable_ido - enable ID-based ordering on a device
2022 * @dev: the PCI device
2023 * @type: which types of IDO to enable
2024 *
2025 * Enable ID-based ordering on @dev.  @type can contain the bits
2026 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2027 * which types of transactions are allowed to be re-ordered.
2028 */
2029void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2030{
2031	int pos;
2032	u16 ctrl;
2033
2034	pos = pci_pcie_cap(dev);
2035	if (!pos)
2036		return;
2037
2038	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2039	if (type & PCI_EXP_IDO_REQUEST)
2040		ctrl |= PCI_EXP_IDO_REQ_EN;
2041	if (type & PCI_EXP_IDO_COMPLETION)
2042		ctrl |= PCI_EXP_IDO_CMP_EN;
2043	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2044}
2045EXPORT_SYMBOL(pci_enable_ido);
2046
2047/**
2048 * pci_disable_ido - disable ID-based ordering on a device
2049 * @dev: the PCI device
2050 * @type: which types of IDO to disable
2051 */
2052void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2053{
2054	int pos;
2055	u16 ctrl;
2056
2057	if (!pci_is_pcie(dev))
2058		return;
2059
2060	pos = pci_pcie_cap(dev);
2061	if (!pos)
2062		return;
 
 
 
 
2063
2064	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2065	if (type & PCI_EXP_IDO_REQUEST)
2066		ctrl &= ~PCI_EXP_IDO_REQ_EN;
2067	if (type & PCI_EXP_IDO_COMPLETION)
2068		ctrl &= ~PCI_EXP_IDO_CMP_EN;
2069	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2070}
2071EXPORT_SYMBOL(pci_disable_ido);
2072
2073/**
2074 * pci_enable_obff - enable optimized buffer flush/fill
2075 * @dev: PCI device
2076 * @type: type of signaling to use
2077 *
2078 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2079 * signaling if possible, falling back to message signaling only if
2080 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2081 * be brought out of L0s or L1 to send the message.  It should be either
2082 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2083 *
2084 * If your device can benefit from receiving all messages, even at the
2085 * power cost of bringing the link back up from a low power state, use
2086 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2087 * preferred type).
2088 *
2089 * RETURNS:
2090 * Zero on success, appropriate error number on failure.
 
 
 
 
 
2091 */
2092int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2093{
2094	int pos;
2095	u32 cap;
2096	u16 ctrl;
2097	int ret;
2098
2099	if (!pci_is_pcie(dev))
2100		return -ENOTSUPP;
 
2101
2102	pos = pci_pcie_cap(dev);
2103	if (!pos)
2104		return -ENOTSUPP;
2105
2106	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2107	if (!(cap & PCI_EXP_OBFF_MASK))
2108		return -ENOTSUPP; /* no OBFF support at all */
2109
2110	/* Make sure the topology supports OBFF as well */
2111	if (dev->bus) {
2112		ret = pci_enable_obff(dev->bus->self, type);
2113		if (ret)
2114			return ret;
2115	}
2116
2117	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2118	if (cap & PCI_EXP_OBFF_WAKE)
2119		ctrl |= PCI_EXP_OBFF_WAKE_EN;
2120	else {
2121		switch (type) {
2122		case PCI_EXP_OBFF_SIGNAL_L0:
2123			if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2124				ctrl |= PCI_EXP_OBFF_MSGA_EN;
2125			break;
2126		case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2127			ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2128			ctrl |= PCI_EXP_OBFF_MSGB_EN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2129			break;
2130		default:
2131			WARN(1, "bad OBFF signal type\n");
2132			return -ENOTSUPP;
2133		}
2134	}
2135	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2136
2137	return 0;
 
 
 
 
2138}
2139EXPORT_SYMBOL(pci_enable_obff);
2140
2141/**
2142 * pci_disable_obff - disable optimized buffer flush/fill
2143 * @dev: PCI device
 
 
2144 *
2145 * Disable OBFF on @dev.
 
2146 */
2147void pci_disable_obff(struct pci_dev *dev)
 
2148{
2149	int pos;
2150	u16 ctrl;
2151
2152	if (!pci_is_pcie(dev))
2153		return;
2154
2155	pos = pci_pcie_cap(dev);
2156	if (!pos)
2157		return;
 
 
 
 
 
2158
2159	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2160	ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2161	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2162}
2163EXPORT_SYMBOL(pci_disable_obff);
2164
2165/**
2166 * pci_ltr_supported - check whether a device supports LTR
2167 * @dev: PCI device
2168 *
2169 * RETURNS:
2170 * True if @dev supports latency tolerance reporting, false otherwise.
2171 */
2172bool pci_ltr_supported(struct pci_dev *dev)
2173{
2174	int pos;
2175	u32 cap;
2176
2177	if (!pci_is_pcie(dev))
2178		return false;
2179
2180	pos = pci_pcie_cap(dev);
2181	if (!pos)
2182		return false;
2183
2184	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2185
2186	return cap & PCI_EXP_DEVCAP2_LTR;
2187}
2188EXPORT_SYMBOL(pci_ltr_supported);
2189
2190/**
2191 * pci_enable_ltr - enable latency tolerance reporting
2192 * @dev: PCI device
 
2193 *
2194 * Enable LTR on @dev if possible, which means enabling it first on
2195 * upstream ports.
2196 *
2197 * RETURNS:
2198 * Zero on success, errno on failure.
2199 */
2200int pci_enable_ltr(struct pci_dev *dev)
2201{
2202	int pos;
2203	u16 ctrl;
2204	int ret;
2205
2206	if (!pci_ltr_supported(dev))
2207		return -ENOTSUPP;
2208
2209	pos = pci_pcie_cap(dev);
2210	if (!pos)
2211		return -ENOTSUPP;
2212
2213	/* Only primary function can enable/disable LTR */
2214	if (PCI_FUNC(dev->devfn) != 0)
2215		return -EINVAL;
2216
2217	/* Enable upstream ports first */
2218	if (dev->bus) {
2219		ret = pci_enable_ltr(dev->bus->self);
2220		if (ret)
2221			return ret;
 
 
2222	}
2223
2224	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2225	ctrl |= PCI_EXP_LTR_EN;
2226	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2227
2228	return 0;
2229}
2230EXPORT_SYMBOL(pci_enable_ltr);
2231
2232/**
2233 * pci_disable_ltr - disable latency tolerance reporting
2234 * @dev: PCI device
 
 
 
 
2235 */
2236void pci_disable_ltr(struct pci_dev *dev)
2237{
2238	int pos;
2239	u16 ctrl;
2240
2241	if (!pci_ltr_supported(dev))
2242		return;
2243
2244	pos = pci_pcie_cap(dev);
2245	if (!pos)
2246		return;
2247
2248	/* Only primary function can enable/disable LTR */
2249	if (PCI_FUNC(dev->devfn) != 0)
2250		return;
2251
2252	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2253	ctrl &= ~PCI_EXP_LTR_EN;
2254	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2255}
2256EXPORT_SYMBOL(pci_disable_ltr);
2257
2258static int __pci_ltr_scale(int *val)
2259{
2260	int scale = 0;
 
2261
2262	while (*val > 1023) {
2263		*val = (*val + 31) / 32;
2264		scale++;
2265	}
2266	return scale;
2267}
 
2268
2269/**
2270 * pci_set_ltr - set LTR latency values
2271 * @dev: PCI device
2272 * @snoop_lat_ns: snoop latency in nanoseconds
2273 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2274 *
2275 * Figure out the scale and set the LTR values accordingly.
 
2276 */
2277int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2278{
2279	int pos, ret, snoop_scale, nosnoop_scale;
2280	u16 val;
2281
2282	if (!pci_ltr_supported(dev))
2283		return -ENOTSUPP;
2284
2285	snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2286	nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2287
2288	if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2289	    nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2290		return -EINVAL;
2291
2292	if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2293	    (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2294		return -EINVAL;
2295
2296	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2297	if (!pos)
2298		return -ENOTSUPP;
2299
2300	val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2301	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2302	if (ret != 4)
2303		return -EIO;
2304
2305	val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2306	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2307	if (ret != 4)
2308		return -EIO;
2309
2310	return 0;
 
2311}
2312EXPORT_SYMBOL(pci_set_ltr);
2313
2314static int pci_acs_enable;
2315
2316/**
2317 * pci_request_acs - ask for ACS to be enabled if supported
 
 
 
 
 
 
2318 */
2319void pci_request_acs(void)
2320{
2321	pci_acs_enable = 1;
 
 
 
 
 
 
 
 
 
 
 
2322}
2323
2324/**
2325 * pci_enable_acs - enable ACS if hardware support it
2326 * @dev: the PCI device
 
 
 
 
 
 
 
 
 
2327 */
2328void pci_enable_acs(struct pci_dev *dev)
2329{
2330	int pos;
2331	u16 cap;
2332	u16 ctrl;
2333
2334	if (!pci_acs_enable)
2335		return;
 
 
 
 
 
2336
2337	if (!pci_is_pcie(dev))
2338		return;
2339
2340	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2341	if (!pos)
2342		return;
 
 
 
2343
2344	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2345	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 
 
 
 
 
 
2346
2347	/* Source Validation */
2348	ctrl |= (cap & PCI_ACS_SV);
2349
2350	/* P2P Request Redirect */
2351	ctrl |= (cap & PCI_ACS_RR);
2352
2353	/* P2P Completion Redirect */
2354	ctrl |= (cap & PCI_ACS_CR);
 
 
 
 
 
2355
2356	/* Upstream Forwarding */
2357	ctrl |= (cap & PCI_ACS_UF);
 
 
 
 
2358
2359	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
2360}
 
2361
2362/**
2363 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2364 * @dev: the PCI device
2365 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2366 *
2367 * Perform INTx swizzling for a device behind one level of bridge.  This is
2368 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2369 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2370 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2371 * the PCI Express Base Specification, Revision 2.1)
2372 */
2373u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2374{
2375	int slot;
2376
2377	if (pci_ari_enabled(dev->bus))
2378		slot = 0;
2379	else
2380		slot = PCI_SLOT(dev->devfn);
2381
2382	return (((pin - 1) + slot) % 4) + 1;
2383}
2384
2385int
2386pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2387{
2388	u8 pin;
2389
2390	pin = dev->pin;
2391	if (!pin)
2392		return -1;
2393
2394	while (!pci_is_root_bus(dev->bus)) {
2395		pin = pci_swizzle_interrupt_pin(dev, pin);
2396		dev = dev->bus->self;
2397	}
2398	*bridge = dev;
2399	return pin;
2400}
2401
2402/**
2403 * pci_common_swizzle - swizzle INTx all the way to root bridge
2404 * @dev: the PCI device
2405 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2406 *
2407 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2408 * bridges all the way up to a PCI root bus.
2409 */
2410u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2411{
2412	u8 pin = *pinp;
2413
2414	while (!pci_is_root_bus(dev->bus)) {
2415		pin = pci_swizzle_interrupt_pin(dev, pin);
2416		dev = dev->bus->self;
2417	}
2418	*pinp = pin;
2419	return PCI_SLOT(dev->devfn);
2420}
 
2421
2422/**
2423 *	pci_release_region - Release a PCI bar
2424 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2425 *	@bar: BAR to release
2426 *
2427 *	Releases the PCI I/O and memory resources previously reserved by a
2428 *	successful call to pci_request_region.  Call this function only
2429 *	after all use of the PCI regions has ceased.
 
2430 */
2431void pci_release_region(struct pci_dev *pdev, int bar)
2432{
2433	struct pci_devres *dr;
2434
2435	if (pci_resource_len(pdev, bar) == 0)
2436		return;
2437	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2438		release_region(pci_resource_start(pdev, bar),
2439				pci_resource_len(pdev, bar));
2440	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2441		release_mem_region(pci_resource_start(pdev, bar),
2442				pci_resource_len(pdev, bar));
2443
2444	dr = find_pci_dr(pdev);
2445	if (dr)
2446		dr->region_mask &= ~(1 << bar);
2447}
 
2448
2449/**
2450 *	__pci_request_region - Reserved PCI I/O and memory resource
2451 *	@pdev: PCI device whose resources are to be reserved
2452 *	@bar: BAR to be reserved
2453 *	@res_name: Name to be associated with resource.
2454 *	@exclusive: whether the region access is exclusive or not
2455 *
2456 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2457 *	being reserved by owner @res_name.  Do not access any
2458 *	address inside the PCI regions unless this call returns
2459 *	successfully.
2460 *
2461 *	If @exclusive is set, then the region is marked so that userspace
2462 *	is explicitly not allowed to map the resource via /dev/mem or
2463 * 	sysfs MMIO access.
2464 *
2465 *	Returns 0 on success, or %EBUSY on error.  A warning
2466 *	message is also printed on failure.
2467 */
2468static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2469									int exclusive)
2470{
2471	struct pci_devres *dr;
2472
2473	if (pci_resource_len(pdev, bar) == 0)
2474		return 0;
2475		
2476	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2477		if (!request_region(pci_resource_start(pdev, bar),
2478			    pci_resource_len(pdev, bar), res_name))
2479			goto err_out;
2480	}
2481	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2482		if (!__request_mem_region(pci_resource_start(pdev, bar),
2483					pci_resource_len(pdev, bar), res_name,
2484					exclusive))
2485			goto err_out;
2486	}
2487
2488	dr = find_pci_dr(pdev);
2489	if (dr)
2490		dr->region_mask |= 1 << bar;
2491
2492	return 0;
2493
2494err_out:
2495	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2496		 &pdev->resource[bar]);
2497	return -EBUSY;
2498}
2499
2500/**
2501 *	pci_request_region - Reserve PCI I/O and memory resource
2502 *	@pdev: PCI device whose resources are to be reserved
2503 *	@bar: BAR to be reserved
2504 *	@res_name: Name to be associated with resource
2505 *
2506 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2507 *	being reserved by owner @res_name.  Do not access any
2508 *	address inside the PCI regions unless this call returns
2509 *	successfully.
2510 *
2511 *	Returns 0 on success, or %EBUSY on error.  A warning
2512 *	message is also printed on failure.
2513 */
2514int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2515{
2516	return __pci_request_region(pdev, bar, res_name, 0);
2517}
 
2518
2519/**
2520 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2521 *	@pdev: PCI device whose resources are to be reserved
2522 *	@bar: BAR to be reserved
2523 *	@res_name: Name to be associated with resource.
2524 *
2525 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2526 *	being reserved by owner @res_name.  Do not access any
2527 *	address inside the PCI regions unless this call returns
2528 *	successfully.
2529 *
2530 *	Returns 0 on success, or %EBUSY on error.  A warning
2531 *	message is also printed on failure.
2532 *
2533 *	The key difference that _exclusive makes it that userspace is
2534 *	explicitly not allowed to map the resource via /dev/mem or
2535 * 	sysfs.
2536 */
2537int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2538{
2539	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2540}
2541/**
2542 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2543 * @pdev: PCI device whose resources were previously reserved
2544 * @bars: Bitmask of BARs to be released
2545 *
2546 * Release selected PCI I/O and memory resources previously reserved.
2547 * Call this function only after all use of the PCI regions has ceased.
2548 */
2549void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2550{
2551	int i;
2552
2553	for (i = 0; i < 6; i++)
2554		if (bars & (1 << i))
2555			pci_release_region(pdev, i);
2556}
 
2557
2558int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2559				 const char *res_name, int excl)
2560{
2561	int i;
2562
2563	for (i = 0; i < 6; i++)
2564		if (bars & (1 << i))
2565			if (__pci_request_region(pdev, i, res_name, excl))
2566				goto err_out;
2567	return 0;
2568
2569err_out:
2570	while(--i >= 0)
2571		if (bars & (1 << i))
2572			pci_release_region(pdev, i);
2573
2574	return -EBUSY;
2575}
2576
2577
2578/**
2579 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2580 * @pdev: PCI device whose resources are to be reserved
2581 * @bars: Bitmask of BARs to be requested
2582 * @res_name: Name to be associated with resource
2583 */
2584int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2585				 const char *res_name)
2586{
2587	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2588}
 
2589
2590int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2591				 int bars, const char *res_name)
2592{
2593	return __pci_request_selected_regions(pdev, bars, res_name,
2594			IORESOURCE_EXCLUSIVE);
2595}
 
2596
2597/**
2598 *	pci_release_regions - Release reserved PCI I/O and memory resources
2599 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2600 *
2601 *	Releases all PCI I/O and memory resources previously reserved by a
2602 *	successful call to pci_request_regions.  Call this function only
2603 *	after all use of the PCI regions has ceased.
 
2604 */
2605
2606void pci_release_regions(struct pci_dev *pdev)
2607{
2608	pci_release_selected_regions(pdev, (1 << 6) - 1);
2609}
 
2610
2611/**
2612 *	pci_request_regions - Reserved PCI I/O and memory resources
2613 *	@pdev: PCI device whose resources are to be reserved
2614 *	@res_name: Name to be associated with resource.
2615 *
2616 *	Mark all PCI regions associated with PCI device @pdev as
2617 *	being reserved by owner @res_name.  Do not access any
2618 *	address inside the PCI regions unless this call returns
2619 *	successfully.
2620 *
2621 *	Returns 0 on success, or %EBUSY on error.  A warning
2622 *	message is also printed on failure.
2623 */
2624int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2625{
2626	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
 
2627}
 
2628
2629/**
2630 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2631 *	@pdev: PCI device whose resources are to be reserved
2632 *	@res_name: Name to be associated with resource.
2633 *
2634 *	Mark all PCI regions associated with PCI device @pdev as
2635 *	being reserved by owner @res_name.  Do not access any
2636 *	address inside the PCI regions unless this call returns
2637 *	successfully.
2638 *
2639 *	pci_request_regions_exclusive() will mark the region so that
2640 * 	/dev/mem and the sysfs MMIO access will not be allowed.
2641 *
2642 *	Returns 0 on success, or %EBUSY on error.  A warning
2643 *	message is also printed on failure.
2644 */
2645int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2646{
2647	return pci_request_selected_regions_exclusive(pdev,
2648					((1 << 6) - 1), res_name);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2649}
 
2650
2651static void __pci_set_master(struct pci_dev *dev, bool enable)
2652{
2653	u16 old_cmd, cmd;
2654
2655	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2656	if (enable)
2657		cmd = old_cmd | PCI_COMMAND_MASTER;
2658	else
2659		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2660	if (cmd != old_cmd) {
2661		dev_dbg(&dev->dev, "%s bus mastering\n",
2662			enable ? "enabling" : "disabling");
2663		pci_write_config_word(dev, PCI_COMMAND, cmd);
2664	}
2665	dev->is_busmaster = enable;
2666}
2667
2668/**
 
 
 
 
 
 
 
 
 
 
 
 
2669 * pcibios_set_master - enable PCI bus-mastering for device dev
2670 * @dev: the PCI device to enable
2671 *
2672 * Enables PCI bus-mastering for the device.  This is the default
2673 * implementation.  Architecture specific implementations can override
2674 * this if necessary.
2675 */
2676void __weak pcibios_set_master(struct pci_dev *dev)
2677{
2678	u8 lat;
2679
2680	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2681	if (pci_is_pcie(dev))
2682		return;
2683
2684	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2685	if (lat < 16)
2686		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2687	else if (lat > pcibios_max_latency)
2688		lat = pcibios_max_latency;
2689	else
2690		return;
2691	dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2692	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2693}
2694
2695/**
2696 * pci_set_master - enables bus-mastering for device dev
2697 * @dev: the PCI device to enable
2698 *
2699 * Enables bus-mastering on the device and calls pcibios_set_master()
2700 * to do the needed arch specific settings.
2701 */
2702void pci_set_master(struct pci_dev *dev)
2703{
2704	__pci_set_master(dev, true);
2705	pcibios_set_master(dev);
2706}
 
2707
2708/**
2709 * pci_clear_master - disables bus-mastering for device dev
2710 * @dev: the PCI device to disable
2711 */
2712void pci_clear_master(struct pci_dev *dev)
2713{
2714	__pci_set_master(dev, false);
2715}
 
2716
2717/**
2718 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2719 * @dev: the PCI device for which MWI is to be enabled
2720 *
2721 * Helper function for pci_set_mwi.
2722 * Originally copied from drivers/net/acenic.c.
2723 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2724 *
2725 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2726 */
2727int pci_set_cacheline_size(struct pci_dev *dev)
2728{
2729	u8 cacheline_size;
2730
2731	if (!pci_cache_line_size)
2732		return -EINVAL;
2733
2734	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2735	   equal to or multiple of the right value. */
2736	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2737	if (cacheline_size >= pci_cache_line_size &&
2738	    (cacheline_size % pci_cache_line_size) == 0)
2739		return 0;
2740
2741	/* Write the correct value. */
2742	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2743	/* Read it back. */
2744	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2745	if (cacheline_size == pci_cache_line_size)
2746		return 0;
2747
2748	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2749		   "supported\n", pci_cache_line_size << 2);
2750
2751	return -EINVAL;
2752}
2753EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2754
2755#ifdef PCI_DISABLE_MWI
2756int pci_set_mwi(struct pci_dev *dev)
2757{
2758	return 0;
2759}
2760
2761int pci_try_set_mwi(struct pci_dev *dev)
2762{
2763	return 0;
2764}
2765
2766void pci_clear_mwi(struct pci_dev *dev)
2767{
2768}
2769
2770#else
2771
2772/**
2773 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2774 * @dev: the PCI device for which MWI is enabled
2775 *
2776 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2777 *
2778 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2779 */
2780int
2781pci_set_mwi(struct pci_dev *dev)
2782{
 
 
 
2783	int rc;
2784	u16 cmd;
2785
2786	rc = pci_set_cacheline_size(dev);
2787	if (rc)
2788		return rc;
2789
2790	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2791	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2792		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2793		cmd |= PCI_COMMAND_INVALIDATE;
2794		pci_write_config_word(dev, PCI_COMMAND, cmd);
2795	}
2796	
2797	return 0;
 
2798}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2799
2800/**
2801 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2802 * @dev: the PCI device for which MWI is enabled
2803 *
2804 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2805 * Callers are not required to check the return value.
2806 *
2807 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2808 */
2809int pci_try_set_mwi(struct pci_dev *dev)
2810{
2811	int rc = pci_set_mwi(dev);
2812	return rc;
 
 
 
2813}
 
2814
2815/**
2816 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2817 * @dev: the PCI device to disable
2818 *
2819 * Disables PCI Memory-Write-Invalidate transaction on the device
2820 */
2821void
2822pci_clear_mwi(struct pci_dev *dev)
2823{
 
2824	u16 cmd;
2825
2826	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2827	if (cmd & PCI_COMMAND_INVALIDATE) {
2828		cmd &= ~PCI_COMMAND_INVALIDATE;
2829		pci_write_config_word(dev, PCI_COMMAND, cmd);
2830	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2831}
2832#endif /* ! PCI_DISABLE_MWI */
2833
2834/**
2835 * pci_intx - enables/disables PCI INTx for device dev
2836 * @pdev: the PCI device to operate on
2837 * @enable: boolean: whether to enable or disable PCI INTx
2838 *
2839 * Enables/disables PCI INTx for device dev
2840 */
2841void
2842pci_intx(struct pci_dev *pdev, int enable)
2843{
2844	u16 pci_command, new;
2845
2846	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2847
2848	if (enable) {
2849		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2850	} else {
2851		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2852	}
2853
2854	if (new != pci_command) {
2855		struct pci_devres *dr;
2856
2857		pci_write_config_word(pdev, PCI_COMMAND, new);
2858
2859		dr = find_pci_dr(pdev);
2860		if (dr && !dr->restore_intx) {
2861			dr->restore_intx = 1;
2862			dr->orig_intx = !enable;
2863		}
2864	}
2865}
2866
2867/**
2868 * pci_intx_mask_supported - probe for INTx masking support
2869 * @dev: the PCI device to operate on
2870 *
2871 * Check if the device dev support INTx masking via the config space
2872 * command word.
2873 */
2874bool pci_intx_mask_supported(struct pci_dev *dev)
2875{
2876	bool mask_supported = false;
2877	u16 orig, new;
2878
2879	pci_cfg_access_lock(dev);
2880
2881	pci_read_config_word(dev, PCI_COMMAND, &orig);
2882	pci_write_config_word(dev, PCI_COMMAND,
2883			      orig ^ PCI_COMMAND_INTX_DISABLE);
2884	pci_read_config_word(dev, PCI_COMMAND, &new);
2885
2886	/*
2887	 * There's no way to protect against hardware bugs or detect them
2888	 * reliably, but as long as we know what the value should be, let's
2889	 * go ahead and check it.
2890	 */
2891	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2892		dev_err(&dev->dev, "Command register changed from "
2893			"0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2894	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2895		mask_supported = true;
2896		pci_write_config_word(dev, PCI_COMMAND, orig);
2897	}
2898
2899	pci_cfg_access_unlock(dev);
2900	return mask_supported;
2901}
2902EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2903
2904static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2905{
2906	struct pci_bus *bus = dev->bus;
2907	bool mask_updated = true;
2908	u32 cmd_status_dword;
2909	u16 origcmd, newcmd;
2910	unsigned long flags;
2911	bool irq_pending;
2912
2913	/*
2914	 * We do a single dword read to retrieve both command and status.
2915	 * Document assumptions that make this possible.
2916	 */
2917	BUILD_BUG_ON(PCI_COMMAND % 4);
2918	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2919
2920	raw_spin_lock_irqsave(&pci_lock, flags);
2921
2922	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2923
2924	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2925
2926	/*
2927	 * Check interrupt status register to see whether our device
2928	 * triggered the interrupt (when masking) or the next IRQ is
2929	 * already pending (when unmasking).
2930	 */
2931	if (mask != irq_pending) {
2932		mask_updated = false;
2933		goto done;
2934	}
2935
2936	origcmd = cmd_status_dword;
2937	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2938	if (mask)
2939		newcmd |= PCI_COMMAND_INTX_DISABLE;
2940	if (newcmd != origcmd)
2941		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2942
2943done:
2944	raw_spin_unlock_irqrestore(&pci_lock, flags);
2945
2946	return mask_updated;
2947}
2948
2949/**
2950 * pci_check_and_mask_intx - mask INTx on pending interrupt
2951 * @dev: the PCI device to operate on
2952 *
2953 * Check if the device dev has its INTx line asserted, mask it and
2954 * return true in that case. False is returned if not interrupt was
2955 * pending.
2956 */
2957bool pci_check_and_mask_intx(struct pci_dev *dev)
2958{
2959	return pci_check_and_set_intx_mask(dev, true);
2960}
2961EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2962
2963/**
2964 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2965 * @dev: the PCI device to operate on
2966 *
2967 * Check if the device dev has its INTx line asserted, unmask it if not
2968 * and return true. False is returned and the mask remains active if
2969 * there was still an interrupt pending.
2970 */
2971bool pci_check_and_unmask_intx(struct pci_dev *dev)
2972{
2973	return pci_check_and_set_intx_mask(dev, false);
2974}
2975EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2976
2977/**
2978 * pci_msi_off - disables any msi or msix capabilities
2979 * @dev: the PCI device to operate on
2980 *
2981 * If you want to use msi see pci_enable_msi and friends.
2982 * This is a lower level primitive that allows us to disable
2983 * msi operation at the device level.
2984 */
2985void pci_msi_off(struct pci_dev *dev)
2986{
2987	int pos;
2988	u16 control;
2989
2990	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2991	if (pos) {
2992		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2993		control &= ~PCI_MSI_FLAGS_ENABLE;
2994		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2995	}
2996	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2997	if (pos) {
2998		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2999		control &= ~PCI_MSIX_FLAGS_ENABLE;
3000		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3001	}
3002}
3003EXPORT_SYMBOL_GPL(pci_msi_off);
3004
3005int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
 
 
 
 
 
 
 
3006{
3007	return dma_set_max_seg_size(&dev->dev, size);
3008}
3009EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3010
3011int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3012{
3013	return dma_set_seg_boundary(&dev->dev, mask);
 
 
 
 
 
 
 
 
 
 
3014}
3015EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3016
3017static int pcie_flr(struct pci_dev *dev, int probe)
 
 
 
 
 
 
 
3018{
3019	int i;
3020	int pos;
3021	u32 cap;
3022	u16 status, control;
3023
3024	pos = pci_pcie_cap(dev);
3025	if (!pos)
3026		return -ENOTTY;
3027
3028	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
3029	if (!(cap & PCI_EXP_DEVCAP_FLR))
3030		return -ENOTTY;
3031
3032	if (probe)
3033		return 0;
3034
3035	/* Wait for Transaction Pending bit clean */
3036	for (i = 0; i < 4; i++) {
3037		if (i)
3038			msleep((1 << (i - 1)) * 100);
3039
3040		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3041		if (!(status & PCI_EXP_DEVSTA_TRPND))
3042			goto clear;
3043	}
3044
3045	dev_err(&dev->dev, "transaction is not cleared; "
3046			"proceeding with reset anyway\n");
3047
3048clear:
3049	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3050	control |= PCI_EXP_DEVCTL_BCR_FLR;
3051	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3052
3053	msleep(100);
3054
3055	return 0;
3056}
 
3057
3058static int pci_af_flr(struct pci_dev *dev, int probe)
3059{
3060	int i;
3061	int pos;
3062	u8 cap;
3063	u8 status;
3064
3065	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3066	if (!pos)
3067		return -ENOTTY;
3068
 
 
 
3069	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3070	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3071		return -ENOTTY;
3072
3073	if (probe)
3074		return 0;
3075
3076	/* Wait for Transaction Pending bit clean */
3077	for (i = 0; i < 4; i++) {
3078		if (i)
3079			msleep((1 << (i - 1)) * 100);
 
 
 
 
3080
3081		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3082		if (!(status & PCI_AF_STATUS_TP))
3083			goto clear;
3084	}
3085
3086	dev_err(&dev->dev, "transaction is not cleared; "
3087			"proceeding with reset anyway\n");
3088
3089clear:
3090	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
 
 
 
 
3091	msleep(100);
3092
3093	return 0;
3094}
3095
3096/**
3097 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3098 * @dev: Device to reset.
3099 * @probe: If set, only check if the device can be reset this way.
3100 *
3101 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3102 * unset, it will be reinitialized internally when going from PCI_D3hot to
3103 * PCI_D0.  If that's the case and the device is not in a low-power state
3104 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3105 *
3106 * NOTE: This causes the caller to sleep for twice the device power transition
3107 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3108 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3109 * Moreover, only devices in D0 can be reset by this function.
3110 */
3111static int pci_pm_reset(struct pci_dev *dev, int probe)
3112{
3113	u16 csr;
3114
3115	if (!dev->pm_cap)
3116		return -ENOTTY;
3117
3118	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3119	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3120		return -ENOTTY;
3121
3122	if (probe)
3123		return 0;
3124
3125	if (dev->current_state != PCI_D0)
3126		return -EINVAL;
3127
3128	csr &= ~PCI_PM_CTRL_STATE_MASK;
3129	csr |= PCI_D3hot;
3130	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3131	pci_dev_d3_sleep(dev);
3132
3133	csr &= ~PCI_PM_CTRL_STATE_MASK;
3134	csr |= PCI_D0;
3135	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3136	pci_dev_d3_sleep(dev);
3137
3138	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3139}
3140
3141static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3142{
3143	u16 ctrl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3144	struct pci_dev *pdev;
3145
3146	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
 
3147		return -ENOTTY;
3148
3149	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3150		if (pdev != dev)
3151			return -ENOTTY;
3152
3153	if (probe)
3154		return 0;
3155
3156	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3157	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3158	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3159	msleep(100);
3160
3161	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3162	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3163	msleep(100);
3164
3165	return 0;
 
 
 
 
 
 
 
 
3166}
3167
3168static int __pci_dev_reset(struct pci_dev *dev, int probe)
3169{
3170	int rc;
 
 
3171
3172	might_sleep();
 
3173
3174	rc = pci_dev_specific_reset(dev, probe);
3175	if (rc != -ENOTTY)
3176		goto done;
3177
3178	rc = pcie_flr(dev, probe);
3179	if (rc != -ENOTTY)
3180		goto done;
 
 
3181
3182	rc = pci_af_flr(dev, probe);
3183	if (rc != -ENOTTY)
3184		goto done;
 
 
 
 
3185
3186	rc = pci_pm_reset(dev, probe);
3187	if (rc != -ENOTTY)
3188		goto done;
 
 
 
 
 
3189
3190	rc = pci_parent_bus_reset(dev, probe);
3191done:
3192	return rc;
3193}
 
3194
3195static int pci_dev_reset(struct pci_dev *dev, int probe)
3196{
3197	int rc;
 
 
 
 
 
 
 
 
3198
3199	if (!probe) {
3200		pci_cfg_access_lock(dev);
3201		/* block PM suspend, driver probe, etc. */
3202		device_lock(&dev->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3203	}
3204
3205	rc = __pci_dev_reset(dev, probe);
 
3206
3207	if (!probe) {
3208		device_unlock(&dev->dev);
3209		pci_cfg_access_unlock(dev);
 
 
 
 
 
 
 
3210	}
3211	return rc;
 
3212}
3213/**
3214 * __pci_reset_function - reset a PCI device function
3215 * @dev: PCI device to reset
3216 *
3217 * Some devices allow an individual function to be reset without affecting
3218 * other functions in the same device.  The PCI device must be responsive
3219 * to PCI config space in order to use this function.
3220 *
3221 * The device function is presumed to be unused when this function is called.
3222 * Resetting the device will make the contents of PCI configuration space
3223 * random, so any caller of this must be prepared to reinitialise the
3224 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3225 * etc.
3226 *
3227 * Returns 0 if the device function was successfully reset or negative if the
3228 * device doesn't support resetting a single function.
3229 */
3230int __pci_reset_function(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3231{
3232	return pci_dev_reset(dev, 0);
 
 
 
 
 
3233}
3234EXPORT_SYMBOL_GPL(__pci_reset_function);
 
 
 
 
3235
3236/**
3237 * __pci_reset_function_locked - reset a PCI device function while holding
3238 * the @dev mutex lock.
3239 * @dev: PCI device to reset
3240 *
3241 * Some devices allow an individual function to be reset without affecting
3242 * other functions in the same device.  The PCI device must be responsive
3243 * to PCI config space in order to use this function.
3244 *
3245 * The device function is presumed to be unused and the caller is holding
3246 * the device mutex lock when this function is called.
 
3247 * Resetting the device will make the contents of PCI configuration space
3248 * random, so any caller of this must be prepared to reinitialise the
3249 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3250 * etc.
3251 *
3252 * Returns 0 if the device function was successfully reset or negative if the
3253 * device doesn't support resetting a single function.
3254 */
3255int __pci_reset_function_locked(struct pci_dev *dev)
3256{
3257	return __pci_dev_reset(dev, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3258}
3259EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3260
3261/**
3262 * pci_probe_reset_function - check whether the device can be safely reset
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3263 * @dev: PCI device to reset
3264 *
3265 * Some devices allow an individual function to be reset without affecting
3266 * other functions in the same device.  The PCI device must be responsive
3267 * to PCI config space in order to use this function.
3268 *
3269 * Returns 0 if the device function can be reset or negative if the
 
 
 
 
 
3270 * device doesn't support resetting a single function.
3271 */
3272int pci_probe_reset_function(struct pci_dev *dev)
3273{
3274	return pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
3275}
 
3276
3277/**
3278 * pci_reset_function - quiesce and reset a PCI device function
3279 * @dev: PCI device to reset
3280 *
3281 * Some devices allow an individual function to be reset without affecting
3282 * other functions in the same device.  The PCI device must be responsive
3283 * to PCI config space in order to use this function.
3284 *
3285 * This function does not just reset the PCI portion of a device, but
3286 * clears all the state associated with the device.  This function differs
3287 * from __pci_reset_function in that it saves and restores device state
3288 * over the reset.
 
3289 *
3290 * Returns 0 if the device function was successfully reset or negative if the
3291 * device doesn't support resetting a single function.
3292 */
3293int pci_reset_function(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3294{
3295	int rc;
3296
3297	rc = pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3298	if (rc)
3299		return rc;
3300
3301	pci_save_state(dev);
 
 
 
 
 
 
 
3302
3303	/*
3304	 * both INTx and MSI are disabled after the Interrupt Disable bit
3305	 * is set and the Bus Master bit is cleared.
3306	 */
3307	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3308
3309	rc = pci_dev_reset(dev, 0);
 
 
3310
3311	pci_restore_state(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3312
3313	return rc;
3314}
3315EXPORT_SYMBOL_GPL(pci_reset_function);
 
 
 
 
 
 
 
 
 
 
 
 
3316
3317/**
3318 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3319 * @dev: PCI device to query
3320 *
3321 * Returns mmrbc: maximum designed memory read count in bytes
3322 *    or appropriate error value.
3323 */
3324int pcix_get_max_mmrbc(struct pci_dev *dev)
3325{
3326	int cap;
3327	u32 stat;
3328
3329	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3330	if (!cap)
3331		return -EINVAL;
3332
3333	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3334		return -EINVAL;
3335
3336	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3337}
3338EXPORT_SYMBOL(pcix_get_max_mmrbc);
3339
3340/**
3341 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3342 * @dev: PCI device to query
3343 *
3344 * Returns mmrbc: maximum memory read count in bytes
3345 *    or appropriate error value.
3346 */
3347int pcix_get_mmrbc(struct pci_dev *dev)
3348{
3349	int cap;
3350	u16 cmd;
3351
3352	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3353	if (!cap)
3354		return -EINVAL;
3355
3356	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3357		return -EINVAL;
3358
3359	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3360}
3361EXPORT_SYMBOL(pcix_get_mmrbc);
3362
3363/**
3364 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3365 * @dev: PCI device to query
3366 * @mmrbc: maximum memory read count in bytes
3367 *    valid values are 512, 1024, 2048, 4096
3368 *
3369 * If possible sets maximum memory read byte count, some bridges have erratas
3370 * that prevent this.
3371 */
3372int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3373{
3374	int cap;
3375	u32 stat, v, o;
3376	u16 cmd;
3377
3378	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3379		return -EINVAL;
3380
3381	v = ffs(mmrbc) - 10;
3382
3383	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3384	if (!cap)
3385		return -EINVAL;
3386
3387	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3388		return -EINVAL;
3389
3390	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3391		return -E2BIG;
3392
3393	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3394		return -EINVAL;
3395
3396	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3397	if (o != v) {
3398		if (v > o && dev->bus &&
3399		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3400			return -EIO;
3401
3402		cmd &= ~PCI_X_CMD_MAX_READ;
3403		cmd |= v << 2;
3404		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3405			return -EIO;
3406	}
3407	return 0;
3408}
3409EXPORT_SYMBOL(pcix_set_mmrbc);
3410
3411/**
3412 * pcie_get_readrq - get PCI Express read request size
3413 * @dev: PCI device to query
3414 *
3415 * Returns maximum memory read request in bytes
3416 *    or appropriate error value.
3417 */
3418int pcie_get_readrq(struct pci_dev *dev)
3419{
3420	int ret, cap;
3421	u16 ctl;
3422
3423	cap = pci_pcie_cap(dev);
3424	if (!cap)
3425		return -EINVAL;
3426
3427	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3428	if (!ret)
3429		ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3430
3431	return ret;
3432}
3433EXPORT_SYMBOL(pcie_get_readrq);
3434
3435/**
3436 * pcie_set_readrq - set PCI Express maximum memory read request
3437 * @dev: PCI device to query
3438 * @rq: maximum memory read count in bytes
3439 *    valid values are 128, 256, 512, 1024, 2048, 4096
3440 *
3441 * If possible sets maximum memory read request in bytes
3442 */
3443int pcie_set_readrq(struct pci_dev *dev, int rq)
3444{
3445	int cap, err = -EINVAL;
3446	u16 ctl, v;
3447
3448	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3449		goto out;
3450
3451	cap = pci_pcie_cap(dev);
3452	if (!cap)
3453		goto out;
3454
3455	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3456	if (err)
3457		goto out;
3458	/*
3459	 * If using the "performance" PCIe config, we clamp the
3460	 * read rq size to the max packet size to prevent the
3461	 * host bridge generating requests larger than we can
3462	 * cope with
3463	 */
3464	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3465		int mps = pcie_get_mps(dev);
3466
3467		if (mps < 0)
3468			return mps;
3469		if (mps < rq)
3470			rq = mps;
3471	}
3472
3473	v = (ffs(rq) - 8) << 12;
3474
3475	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3476		ctl &= ~PCI_EXP_DEVCTL_READRQ;
3477		ctl |= v;
3478		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3479	}
3480
3481out:
3482	return err;
3483}
3484EXPORT_SYMBOL(pcie_set_readrq);
3485
3486/**
3487 * pcie_get_mps - get PCI Express maximum payload size
3488 * @dev: PCI device to query
3489 *
3490 * Returns maximum payload size in bytes
3491 *    or appropriate error value.
3492 */
3493int pcie_get_mps(struct pci_dev *dev)
3494{
3495	int ret, cap;
3496	u16 ctl;
3497
3498	cap = pci_pcie_cap(dev);
3499	if (!cap)
3500		return -EINVAL;
3501
3502	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3503	if (!ret)
3504		ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3505
3506	return ret;
3507}
 
3508
3509/**
3510 * pcie_set_mps - set PCI Express maximum payload size
3511 * @dev: PCI device to query
3512 * @mps: maximum payload size in bytes
3513 *    valid values are 128, 256, 512, 1024, 2048, 4096
3514 *
3515 * If possible sets maximum payload size
3516 */
3517int pcie_set_mps(struct pci_dev *dev, int mps)
3518{
3519	int cap, err = -EINVAL;
3520	u16 ctl, v;
3521
3522	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3523		goto out;
3524
3525	v = ffs(mps) - 8;
3526	if (v > dev->pcie_mpss) 
3527		goto out;
3528	v <<= 5;
3529
3530	cap = pci_pcie_cap(dev);
3531	if (!cap)
3532		goto out;
3533
3534	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3535	if (err)
3536		goto out;
3537
3538	if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3539		ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3540		ctl |= v;
3541		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3542	}
3543out:
3544	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3545}
3546
3547/**
 
 
 
 
 
 
 
 
 
 
 
 
3548 * pci_select_bars - Make BAR mask from the type of resource
3549 * @dev: the PCI device for which BAR mask is made
3550 * @flags: resource type mask to be selected
3551 *
3552 * This helper routine makes bar mask from the type of resource.
3553 */
3554int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3555{
3556	int i, bars = 0;
3557	for (i = 0; i < PCI_NUM_RESOURCES; i++)
3558		if (pci_resource_flags(dev, i) & flags)
3559			bars |= (1 << i);
3560	return bars;
3561}
3562
3563/**
3564 * pci_resource_bar - get position of the BAR associated with a resource
3565 * @dev: the PCI device
3566 * @resno: the resource number
3567 * @type: the BAR type to be filled in
3568 *
3569 * Returns BAR position in config space, or 0 if the BAR is invalid.
3570 */
3571int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3572{
3573	int reg;
3574
3575	if (resno < PCI_ROM_RESOURCE) {
3576		*type = pci_bar_unknown;
3577		return PCI_BASE_ADDRESS_0 + 4 * resno;
3578	} else if (resno == PCI_ROM_RESOURCE) {
3579		*type = pci_bar_mem32;
3580		return dev->rom_base_reg;
3581	} else if (resno < PCI_BRIDGE_RESOURCES) {
3582		/* device specific resource */
3583		reg = pci_iov_resource_bar(dev, resno, type);
3584		if (reg)
3585			return reg;
3586	}
3587
3588	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3589	return 0;
3590}
3591
3592/* Some architectures require additional programming to enable VGA */
3593static arch_set_vga_state_t arch_set_vga_state;
3594
3595void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3596{
3597	arch_set_vga_state = func;	/* NULL disables */
3598}
3599
3600static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3601		      unsigned int command_bits, u32 flags)
3602{
3603	if (arch_set_vga_state)
3604		return arch_set_vga_state(dev, decode, command_bits,
3605						flags);
3606	return 0;
3607}
3608
3609/**
3610 * pci_set_vga_state - set VGA decode state on device and parents if requested
3611 * @dev: the PCI device
3612 * @decode: true = enable decoding, false = disable decoding
3613 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3614 * @flags: traverse ancestors and change bridges
3615 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3616 */
3617int pci_set_vga_state(struct pci_dev *dev, bool decode,
3618		      unsigned int command_bits, u32 flags)
3619{
3620	struct pci_bus *bus;
3621	struct pci_dev *bridge;
3622	u16 cmd;
3623	int rc;
3624
3625	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3626
3627	/* ARCH specific VGA enables */
3628	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3629	if (rc)
3630		return rc;
3631
3632	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3633		pci_read_config_word(dev, PCI_COMMAND, &cmd);
3634		if (decode == true)
3635			cmd |= command_bits;
3636		else
3637			cmd &= ~command_bits;
3638		pci_write_config_word(dev, PCI_COMMAND, cmd);
3639	}
3640
3641	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3642		return 0;
3643
3644	bus = dev->bus;
3645	while (bus) {
3646		bridge = bus->self;
3647		if (bridge) {
3648			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3649					     &cmd);
3650			if (decode == true)
3651				cmd |= PCI_BRIDGE_CTL_VGA;
3652			else
3653				cmd &= ~PCI_BRIDGE_CTL_VGA;
3654			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3655					      cmd);
3656		}
3657		bus = bus->parent;
3658	}
3659	return 0;
3660}
3661
3662#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3663static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3664static DEFINE_SPINLOCK(resource_alignment_lock);
3665
3666/**
3667 * pci_specified_resource_alignment - get resource alignment specified by user.
3668 * @dev: the PCI device to get
 
3669 *
3670 * RETURNS: Resource alignment if it is specified.
3671 *          Zero if it is not specified.
3672 */
3673resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
 
3674{
3675	int seg, bus, slot, func, align_order, count;
3676	resource_size_t align = 0;
3677	char *p;
 
3678
3679	spin_lock(&resource_alignment_lock);
3680	p = resource_alignment_param;
 
 
 
 
 
 
 
 
3681	while (*p) {
3682		count = 0;
3683		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3684							p[count] == '@') {
3685			p += count + 1;
3686		} else {
3687			align_order = -1;
3688		}
3689		if (sscanf(p, "%x:%x:%x.%x%n",
3690			&seg, &bus, &slot, &func, &count) != 4) {
3691			seg = 0;
3692			if (sscanf(p, "%x:%x.%x%n",
3693					&bus, &slot, &func, &count) != 3) {
3694				/* Invalid format */
3695				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3696					p);
3697				break;
3698			}
 
 
3699		}
3700		p += count;
3701		if (seg == pci_domain_nr(dev->bus) &&
3702			bus == dev->bus->number &&
3703			slot == PCI_SLOT(dev->devfn) &&
3704			func == PCI_FUNC(dev->devfn)) {
3705			if (align_order == -1) {
3706				align = PAGE_SIZE;
3707			} else {
3708				align = 1 << align_order;
3709			}
3710			/* Found */
3711			break;
3712		}
 
3713		if (*p != ';' && *p != ',') {
3714			/* End of param or invalid format */
3715			break;
3716		}
3717		p++;
3718	}
 
3719	spin_unlock(&resource_alignment_lock);
3720	return align;
3721}
3722
3723/**
3724 * pci_is_reassigndev - check if specified PCI is target device to reassign
3725 * @dev: the PCI device to check
3726 *
3727 * RETURNS: non-zero for PCI device is a target device to reassign,
3728 *          or zero is not.
3729 */
3730int pci_is_reassigndev(struct pci_dev *dev)
3731{
3732	return (pci_specified_resource_alignment(dev) != 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3733}
3734
3735/*
3736 * This function disables memory decoding and releases memory resources
3737 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3738 * It also rounds up size to specified alignment.
3739 * Later on, the kernel will assign page-aligned memory resource back
3740 * to the device.
3741 */
3742void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3743{
3744	int i;
3745	struct resource *r;
3746	resource_size_t align, size;
3747	u16 command;
 
3748
3749	if (!pci_is_reassigndev(dev))
 
 
 
 
 
 
 
 
 
 
 
3750		return;
3751
3752	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3753	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3754		dev_warn(&dev->dev,
3755			"Can't reassign resources to host bridge.\n");
3756		return;
3757	}
3758
3759	dev_info(&dev->dev,
3760		"Disabling memory decoding and releasing memory resources.\n");
3761	pci_read_config_word(dev, PCI_COMMAND, &command);
3762	command &= ~PCI_COMMAND_MEMORY;
3763	pci_write_config_word(dev, PCI_COMMAND, command);
3764
3765	align = pci_specified_resource_alignment(dev);
3766	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3767		r = &dev->resource[i];
3768		if (!(r->flags & IORESOURCE_MEM))
3769			continue;
3770		size = resource_size(r);
3771		if (size < align) {
3772			size = align;
3773			dev_info(&dev->dev,
3774				"Rounding up size of resource #%d to %#llx.\n",
3775				i, (unsigned long long)size);
3776		}
3777		r->end = size - 1;
3778		r->start = 0;
3779	}
3780	/* Need to disable bridge's resource window,
3781	 * to enable the kernel to reassign new resource
3782	 * window later on.
3783	 */
3784	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3785	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3786		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3787			r = &dev->resource[i];
3788			if (!(r->flags & IORESOURCE_MEM))
3789				continue;
 
3790			r->end = resource_size(r) - 1;
3791			r->start = 0;
3792		}
3793		pci_disable_bridge_window(dev);
3794	}
3795}
3796
3797ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3798{
3799	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3800		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3801	spin_lock(&resource_alignment_lock);
3802	strncpy(resource_alignment_param, buf, count);
3803	resource_alignment_param[count] = '\0';
3804	spin_unlock(&resource_alignment_lock);
 
3805	return count;
3806}
3807
3808ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
 
3809{
3810	size_t count;
 
 
 
 
 
 
 
 
 
 
 
 
3811	spin_lock(&resource_alignment_lock);
3812	count = snprintf(buf, size, "%s", resource_alignment_param);
 
 
 
 
 
 
3813	spin_unlock(&resource_alignment_lock);
3814	return count;
3815}
3816
3817static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3818{
3819	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3820}
3821
3822static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3823					const char *buf, size_t count)
3824{
3825	return pci_set_resource_alignment_param(buf, count);
3826}
3827
3828BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3829					pci_resource_alignment_store);
3830
3831static int __init pci_resource_alignment_sysfs_init(void)
3832{
3833	return bus_create_file(&pci_bus_type,
3834					&bus_attr_resource_alignment);
3835}
3836
3837late_initcall(pci_resource_alignment_sysfs_init);
3838
3839static void __devinit pci_no_domains(void)
3840{
3841#ifdef CONFIG_PCI_DOMAINS
3842	pci_domains_supported = 0;
3843#endif
3844}
3845
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3846/**
3847 * pci_ext_cfg_enabled - can we access extended PCI config space?
3848 * @dev: The PCI device of the root bridge.
3849 *
3850 * Returns 1 if we can access PCI extended config space (offsets
3851 * greater than 0xff). This is the default implementation. Architecture
3852 * implementations can override this.
3853 */
3854int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3855{
3856	return 1;
3857}
3858
3859void __weak pci_fixup_cardbus(struct pci_bus *bus)
3860{
3861}
3862EXPORT_SYMBOL(pci_fixup_cardbus);
3863
3864static int __init pci_setup(char *str)
3865{
3866	while (str) {
3867		char *k = strchr(str, ',');
3868		if (k)
3869			*k++ = 0;
3870		if (*str && (str = pcibios_setup(str)) && *str) {
3871			if (!strcmp(str, "nomsi")) {
3872				pci_no_msi();
 
 
 
3873			} else if (!strcmp(str, "noaer")) {
3874				pci_no_aer();
 
 
3875			} else if (!strncmp(str, "realloc=", 8)) {
3876				pci_realloc_get_opt(str + 8);
3877			} else if (!strncmp(str, "realloc", 7)) {
3878				pci_realloc_get_opt("on");
3879			} else if (!strcmp(str, "nodomains")) {
3880				pci_no_domains();
3881			} else if (!strncmp(str, "noari", 5)) {
3882				pcie_ari_disabled = true;
3883			} else if (!strncmp(str, "cbiosize=", 9)) {
3884				pci_cardbus_io_size = memparse(str + 9, &str);
3885			} else if (!strncmp(str, "cbmemsize=", 10)) {
3886				pci_cardbus_mem_size = memparse(str + 10, &str);
3887			} else if (!strncmp(str, "resource_alignment=", 19)) {
3888				pci_set_resource_alignment_param(str + 19,
3889							strlen(str + 19));
3890			} else if (!strncmp(str, "ecrc=", 5)) {
3891				pcie_ecrc_get_policy(str + 5);
3892			} else if (!strncmp(str, "hpiosize=", 9)) {
3893				pci_hotplug_io_size = memparse(str + 9, &str);
 
 
 
 
3894			} else if (!strncmp(str, "hpmemsize=", 10)) {
3895				pci_hotplug_mem_size = memparse(str + 10, &str);
 
 
 
 
 
 
3896			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3897				pcie_bus_config = PCIE_BUS_TUNE_OFF;
3898			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
3899				pcie_bus_config = PCIE_BUS_SAFE;
3900			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
3901				pcie_bus_config = PCIE_BUS_PERFORMANCE;
3902			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3903				pcie_bus_config = PCIE_BUS_PEER2PEER;
3904			} else if (!strncmp(str, "pcie_scan_all", 13)) {
3905				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
 
 
3906			} else {
3907				printk(KERN_ERR "PCI: Unknown option `%s'\n",
3908						str);
3909			}
3910		}
3911		str = k;
3912	}
3913	return 0;
3914}
3915early_param("pci", pci_setup);
3916
3917EXPORT_SYMBOL(pci_reenable_device);
3918EXPORT_SYMBOL(pci_enable_device_io);
3919EXPORT_SYMBOL(pci_enable_device_mem);
3920EXPORT_SYMBOL(pci_enable_device);
3921EXPORT_SYMBOL(pcim_enable_device);
3922EXPORT_SYMBOL(pcim_pin_device);
3923EXPORT_SYMBOL(pci_disable_device);
3924EXPORT_SYMBOL(pci_find_capability);
3925EXPORT_SYMBOL(pci_bus_find_capability);
3926EXPORT_SYMBOL(pci_release_regions);
3927EXPORT_SYMBOL(pci_request_regions);
3928EXPORT_SYMBOL(pci_request_regions_exclusive);
3929EXPORT_SYMBOL(pci_release_region);
3930EXPORT_SYMBOL(pci_request_region);
3931EXPORT_SYMBOL(pci_request_region_exclusive);
3932EXPORT_SYMBOL(pci_release_selected_regions);
3933EXPORT_SYMBOL(pci_request_selected_regions);
3934EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3935EXPORT_SYMBOL(pci_set_master);
3936EXPORT_SYMBOL(pci_clear_master);
3937EXPORT_SYMBOL(pci_set_mwi);
3938EXPORT_SYMBOL(pci_try_set_mwi);
3939EXPORT_SYMBOL(pci_clear_mwi);
3940EXPORT_SYMBOL_GPL(pci_intx);
3941EXPORT_SYMBOL(pci_assign_resource);
3942EXPORT_SYMBOL(pci_find_parent_resource);
3943EXPORT_SYMBOL(pci_select_bars);
3944
3945EXPORT_SYMBOL(pci_set_power_state);
3946EXPORT_SYMBOL(pci_save_state);
3947EXPORT_SYMBOL(pci_restore_state);
3948EXPORT_SYMBOL(pci_pme_capable);
3949EXPORT_SYMBOL(pci_pme_active);
3950EXPORT_SYMBOL(pci_wake_from_d3);
3951EXPORT_SYMBOL(pci_target_state);
3952EXPORT_SYMBOL(pci_prepare_to_sleep);
3953EXPORT_SYMBOL(pci_back_from_sleep);
3954EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI Bus Services, see include/linux/pci.h for further explanation.
   4 *
   5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   6 * David Mosberger-Tang
   7 *
   8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   9 */
  10
  11#include <linux/acpi.h>
  12#include <linux/kernel.h>
  13#include <linux/delay.h>
  14#include <linux/dmi.h>
  15#include <linux/init.h>
  16#include <linux/msi.h>
  17#include <linux/of.h>
  18#include <linux/pci.h>
  19#include <linux/pm.h>
  20#include <linux/slab.h>
  21#include <linux/module.h>
  22#include <linux/spinlock.h>
  23#include <linux/string.h>
  24#include <linux/log2.h>
  25#include <linux/logic_pio.h>
  26#include <linux/pm_wakeup.h>
  27#include <linux/interrupt.h>
  28#include <linux/device.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/pci_hotplug.h>
  31#include <linux/vmalloc.h>
  32#include <asm/dma.h>
  33#include <linux/aer.h>
  34#include <linux/bitfield.h>
  35#include "pci.h"
  36
  37DEFINE_MUTEX(pci_slot_mutex);
  38
  39const char *pci_power_names[] = {
  40	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  41};
  42EXPORT_SYMBOL_GPL(pci_power_names);
  43
  44#ifdef CONFIG_X86_32
  45int isa_dma_bridge_buggy;
  46EXPORT_SYMBOL(isa_dma_bridge_buggy);
  47#endif
  48
  49int pci_pci_problems;
  50EXPORT_SYMBOL(pci_pci_problems);
  51
  52unsigned int pci_pm_d3hot_delay;
  53
  54static void pci_pme_list_scan(struct work_struct *work);
  55
  56static LIST_HEAD(pci_pme_list);
  57static DEFINE_MUTEX(pci_pme_list_mutex);
  58static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  59
  60struct pci_pme_device {
  61	struct list_head list;
  62	struct pci_dev *dev;
  63};
  64
  65#define PME_TIMEOUT 1000 /* How long between PME checks */
  66
  67static void pci_dev_d3_sleep(struct pci_dev *dev)
  68{
  69	unsigned int delay_ms = max(dev->d3hot_delay, pci_pm_d3hot_delay);
  70	unsigned int upper;
  71
  72	if (delay_ms) {
  73		/* Use a 20% upper bound, 1ms minimum */
  74		upper = max(DIV_ROUND_CLOSEST(delay_ms, 5), 1U);
  75		usleep_range(delay_ms * USEC_PER_MSEC,
  76			     (delay_ms + upper) * USEC_PER_MSEC);
  77	}
  78}
  79
  80bool pci_reset_supported(struct pci_dev *dev)
  81{
  82	return dev->reset_methods[0] != 0;
  83}
  84
  85#ifdef CONFIG_PCI_DOMAINS
  86int pci_domains_supported = 1;
  87#endif
  88
  89#define DEFAULT_CARDBUS_IO_SIZE		(256)
  90#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  91/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  92unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  93unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  94
  95#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  96#define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
  97#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
  98/* hpiosize=nn can override this */
  99unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
 100/*
 101 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
 102 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
 103 * pci=hpmemsize=nnM overrides both
 104 */
 105unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
 106unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
 107
 108#define DEFAULT_HOTPLUG_BUS_SIZE	1
 109unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
 110
 111
 112/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
 113#ifdef CONFIG_PCIE_BUS_TUNE_OFF
 114enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
 115#elif defined CONFIG_PCIE_BUS_SAFE
 116enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
 117#elif defined CONFIG_PCIE_BUS_PERFORMANCE
 118enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
 119#elif defined CONFIG_PCIE_BUS_PEER2PEER
 120enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
 121#else
 122enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
 123#endif
 124
 125/*
 126 * The default CLS is used if arch didn't set CLS explicitly and not
 127 * all pci devices agree on the same value.  Arch can override either
 128 * the dfl or actual value as it sees fit.  Don't forget this is
 129 * measured in 32-bit words, not bytes.
 130 */
 131u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
 132u8 pci_cache_line_size;
 133
 134/*
 135 * If we set up a device for bus mastering, we need to check the latency
 136 * timer as certain BIOSes forget to set it properly.
 137 */
 138unsigned int pcibios_max_latency = 255;
 139
 140/* If set, the PCIe ARI capability will not be used. */
 141static bool pcie_ari_disabled;
 142
 143/* If set, the PCIe ATS capability will not be used. */
 144static bool pcie_ats_disabled;
 145
 146/* If set, the PCI config space of each device is printed during boot. */
 147bool pci_early_dump;
 148
 149bool pci_ats_disabled(void)
 150{
 151	return pcie_ats_disabled;
 152}
 153EXPORT_SYMBOL_GPL(pci_ats_disabled);
 154
 155/* Disable bridge_d3 for all PCIe ports */
 156static bool pci_bridge_d3_disable;
 157/* Force bridge_d3 for all PCIe ports */
 158static bool pci_bridge_d3_force;
 159
 160static int __init pcie_port_pm_setup(char *str)
 161{
 162	if (!strcmp(str, "off"))
 163		pci_bridge_d3_disable = true;
 164	else if (!strcmp(str, "force"))
 165		pci_bridge_d3_force = true;
 166	return 1;
 167}
 168__setup("pcie_port_pm=", pcie_port_pm_setup);
 169
 170/* Time to wait after a reset for device to become responsive */
 171#define PCIE_RESET_READY_POLL_MS 60000
 172
 173/**
 174 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 175 * @bus: pointer to PCI bus structure to search
 176 *
 177 * Given a PCI bus, returns the highest PCI bus number present in the set
 178 * including the given PCI bus and its list of child PCI buses.
 179 */
 180unsigned char pci_bus_max_busnr(struct pci_bus *bus)
 181{
 182	struct pci_bus *tmp;
 183	unsigned char max, n;
 184
 185	max = bus->busn_res.end;
 186	list_for_each_entry(tmp, &bus->children, node) {
 187		n = pci_bus_max_busnr(tmp);
 188		if (n > max)
 189			max = n;
 190	}
 191	return max;
 192}
 193EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 194
 195/**
 196 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
 197 * @pdev: the PCI device
 198 *
 199 * Returns error bits set in PCI_STATUS and clears them.
 200 */
 201int pci_status_get_and_clear_errors(struct pci_dev *pdev)
 202{
 203	u16 status;
 204	int ret;
 205
 206	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
 207	if (ret != PCIBIOS_SUCCESSFUL)
 208		return -EIO;
 209
 210	status &= PCI_STATUS_ERROR_BITS;
 211	if (status)
 212		pci_write_config_word(pdev, PCI_STATUS, status);
 213
 214	return status;
 215}
 216EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
 217
 218#ifdef CONFIG_HAS_IOMEM
 219static void __iomem *__pci_ioremap_resource(struct pci_dev *pdev, int bar,
 220					    bool write_combine)
 221{
 222	struct resource *res = &pdev->resource[bar];
 223	resource_size_t start = res->start;
 224	resource_size_t size = resource_size(res);
 225
 226	/*
 227	 * Make sure the BAR is actually a memory resource, not an IO resource
 228	 */
 229	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
 230		pci_err(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
 231		return NULL;
 232	}
 233
 234	if (write_combine)
 235		return ioremap_wc(start, size);
 236
 237	return ioremap(start, size);
 238}
 239
 240void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 241{
 242	return __pci_ioremap_resource(pdev, bar, false);
 243}
 244EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 245
 246void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
 247{
 248	return __pci_ioremap_resource(pdev, bar, true);
 249}
 250EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
 251#endif
 252
 
 253/**
 254 * pci_dev_str_match_path - test if a path string matches a device
 255 * @dev: the PCI device to test
 256 * @path: string to match the device against
 257 * @endptr: pointer to the string after the match
 258 *
 259 * Test if a string (typically from a kernel parameter) formatted as a
 260 * path of device/function addresses matches a PCI device. The string must
 261 * be of the form:
 262 *
 263 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
 264 *
 265 * A path for a device can be obtained using 'lspci -t'.  Using a path
 266 * is more robust against bus renumbering than using only a single bus,
 267 * device and function address.
 268 *
 269 * Returns 1 if the string matches the device, 0 if it does not and
 270 * a negative error code if it fails to parse the string.
 271 */
 272static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
 273				  const char **endptr)
 274{
 275	int ret;
 276	unsigned int seg, bus, slot, func;
 277	char *wpath, *p;
 278	char end;
 279
 280	*endptr = strchrnul(path, ';');
 281
 282	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
 283	if (!wpath)
 284		return -ENOMEM;
 285
 286	while (1) {
 287		p = strrchr(wpath, '/');
 288		if (!p)
 289			break;
 290		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
 291		if (ret != 2) {
 292			ret = -EINVAL;
 293			goto free_and_exit;
 294		}
 295
 296		if (dev->devfn != PCI_DEVFN(slot, func)) {
 297			ret = 0;
 298			goto free_and_exit;
 299		}
 300
 301		/*
 302		 * Note: we don't need to get a reference to the upstream
 303		 * bridge because we hold a reference to the top level
 304		 * device which should hold a reference to the bridge,
 305		 * and so on.
 306		 */
 307		dev = pci_upstream_bridge(dev);
 308		if (!dev) {
 309			ret = 0;
 310			goto free_and_exit;
 311		}
 312
 313		*p = 0;
 314	}
 315
 316	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
 317		     &func, &end);
 318	if (ret != 4) {
 319		seg = 0;
 320		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
 321		if (ret != 3) {
 322			ret = -EINVAL;
 323			goto free_and_exit;
 324		}
 325	}
 326
 327	ret = (seg == pci_domain_nr(dev->bus) &&
 328	       bus == dev->bus->number &&
 329	       dev->devfn == PCI_DEVFN(slot, func));
 330
 331free_and_exit:
 332	kfree(wpath);
 333	return ret;
 334}
 335
 336/**
 337 * pci_dev_str_match - test if a string matches a device
 338 * @dev: the PCI device to test
 339 * @p: string to match the device against
 340 * @endptr: pointer to the string after the match
 341 *
 342 * Test if a string (typically from a kernel parameter) matches a specified
 343 * PCI device. The string may be of one of the following formats:
 344 *
 345 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
 346 *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
 347 *
 348 * The first format specifies a PCI bus/device/function address which
 349 * may change if new hardware is inserted, if motherboard firmware changes,
 350 * or due to changes caused in kernel parameters. If the domain is
 351 * left unspecified, it is taken to be 0.  In order to be robust against
 352 * bus renumbering issues, a path of PCI device/function numbers may be used
 353 * to address the specific device.  The path for a device can be determined
 354 * through the use of 'lspci -t'.
 355 *
 356 * The second format matches devices using IDs in the configuration
 357 * space which may match multiple devices in the system. A value of 0
 358 * for any field will match all devices. (Note: this differs from
 359 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
 360 * legacy reasons and convenience so users don't have to specify
 361 * FFFFFFFFs on the command line.)
 362 *
 363 * Returns 1 if the string matches the device, 0 if it does not and
 364 * a negative error code if the string cannot be parsed.
 365 */
 366static int pci_dev_str_match(struct pci_dev *dev, const char *p,
 367			     const char **endptr)
 368{
 369	int ret;
 370	int count;
 371	unsigned short vendor, device, subsystem_vendor, subsystem_device;
 372
 373	if (strncmp(p, "pci:", 4) == 0) {
 374		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
 375		p += 4;
 376		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
 377			     &subsystem_vendor, &subsystem_device, &count);
 378		if (ret != 4) {
 379			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
 380			if (ret != 2)
 381				return -EINVAL;
 382
 383			subsystem_vendor = 0;
 384			subsystem_device = 0;
 385		}
 386
 387		p += count;
 388
 389		if ((!vendor || vendor == dev->vendor) &&
 390		    (!device || device == dev->device) &&
 391		    (!subsystem_vendor ||
 392			    subsystem_vendor == dev->subsystem_vendor) &&
 393		    (!subsystem_device ||
 394			    subsystem_device == dev->subsystem_device))
 395			goto found;
 396	} else {
 397		/*
 398		 * PCI Bus, Device, Function IDs are specified
 399		 * (optionally, may include a path of devfns following it)
 400		 */
 401		ret = pci_dev_str_match_path(dev, p, &p);
 402		if (ret < 0)
 403			return ret;
 404		else if (ret)
 405			goto found;
 406	}
 407
 408	*endptr = p;
 409	return 0;
 410
 411found:
 412	*endptr = p;
 413	return 1;
 414}
 415
 416static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 417				  u8 pos, int cap, int *ttl)
 418{
 419	u8 id;
 420	u16 ent;
 421
 422	pci_bus_read_config_byte(bus, devfn, pos, &pos);
 423
 424	while ((*ttl)--) {
 
 425		if (pos < 0x40)
 426			break;
 427		pos &= ~3;
 428		pci_bus_read_config_word(bus, devfn, pos, &ent);
 429
 430		id = ent & 0xff;
 431		if (id == 0xff)
 432			break;
 433		if (id == cap)
 434			return pos;
 435		pos = (ent >> 8);
 436	}
 437	return 0;
 438}
 439
 440static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 441			      u8 pos, int cap)
 442{
 443	int ttl = PCI_FIND_CAP_TTL;
 444
 445	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 446}
 447
 448u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 449{
 450	return __pci_find_next_cap(dev->bus, dev->devfn,
 451				   pos + PCI_CAP_LIST_NEXT, cap);
 452}
 453EXPORT_SYMBOL_GPL(pci_find_next_capability);
 454
 455static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
 456				    unsigned int devfn, u8 hdr_type)
 457{
 458	u16 status;
 459
 460	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 461	if (!(status & PCI_STATUS_CAP_LIST))
 462		return 0;
 463
 464	switch (hdr_type) {
 465	case PCI_HEADER_TYPE_NORMAL:
 466	case PCI_HEADER_TYPE_BRIDGE:
 467		return PCI_CAPABILITY_LIST;
 468	case PCI_HEADER_TYPE_CARDBUS:
 469		return PCI_CB_CAPABILITY_LIST;
 
 
 470	}
 471
 472	return 0;
 473}
 474
 475/**
 476 * pci_find_capability - query for devices' capabilities
 477 * @dev: PCI device to query
 478 * @cap: capability code
 479 *
 480 * Tell if a device supports a given PCI capability.
 481 * Returns the address of the requested capability structure within the
 482 * device's PCI configuration space or 0 in case the device does not
 483 * support it.  Possible values for @cap include:
 484 *
 485 *  %PCI_CAP_ID_PM           Power Management
 486 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
 487 *  %PCI_CAP_ID_VPD          Vital Product Data
 488 *  %PCI_CAP_ID_SLOTID       Slot Identification
 489 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 490 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
 491 *  %PCI_CAP_ID_PCIX         PCI-X
 492 *  %PCI_CAP_ID_EXP          PCI Express
 493 */
 494u8 pci_find_capability(struct pci_dev *dev, int cap)
 495{
 496	u8 pos;
 497
 498	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 499	if (pos)
 500		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 501
 502	return pos;
 503}
 504EXPORT_SYMBOL(pci_find_capability);
 505
 506/**
 507 * pci_bus_find_capability - query for devices' capabilities
 508 * @bus: the PCI bus to query
 509 * @devfn: PCI device to query
 510 * @cap: capability code
 511 *
 512 * Like pci_find_capability() but works for PCI devices that do not have a
 513 * pci_dev structure set up yet.
 514 *
 515 * Returns the address of the requested capability structure within the
 516 * device's PCI configuration space or 0 in case the device does not
 517 * support it.
 518 */
 519u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 520{
 521	u8 hdr_type, pos;
 
 522
 523	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 524
 525	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 526	if (pos)
 527		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 528
 529	return pos;
 530}
 531EXPORT_SYMBOL(pci_bus_find_capability);
 532
 533/**
 534 * pci_find_next_ext_capability - Find an extended capability
 535 * @dev: PCI device to query
 536 * @start: address at which to start looking (0 to start at beginning of list)
 537 * @cap: capability code
 538 *
 539 * Returns the address of the next matching extended capability structure
 540 * within the device's PCI configuration space or 0 if the device does
 541 * not support it.  Some capabilities can occur several times, e.g., the
 542 * vendor-specific capability, and this provides a way to find them all.
 
 
 
 
 543 */
 544u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
 545{
 546	u32 header;
 547	int ttl;
 548	u16 pos = PCI_CFG_SPACE_SIZE;
 549
 550	/* minimum 8 bytes per capability */
 551	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 552
 553	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 554		return 0;
 555
 556	if (start)
 557		pos = start;
 558
 559	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 560		return 0;
 561
 562	/*
 563	 * If we have no capabilities, this is indicated by cap ID,
 564	 * cap version and next pointer all being 0.
 565	 */
 566	if (header == 0)
 567		return 0;
 568
 569	while (ttl-- > 0) {
 570		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 571			return pos;
 572
 573		pos = PCI_EXT_CAP_NEXT(header);
 574		if (pos < PCI_CFG_SPACE_SIZE)
 575			break;
 576
 577		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 578			break;
 579	}
 580
 581	return 0;
 582}
 583EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 584
 585/**
 586 * pci_find_ext_capability - Find an extended capability
 587 * @dev: PCI device to query
 588 * @cap: capability code
 
 589 *
 590 * Returns the address of the requested extended capability structure
 591 * within the device's PCI configuration space or 0 if the device does
 592 * not support it.  Possible values for @cap include:
 593 *
 594 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 595 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 596 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 597 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 598 */
 599u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
 
 600{
 601	return pci_find_next_ext_capability(dev, 0, cap);
 602}
 603EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 604
 605/**
 606 * pci_get_dsn - Read and return the 8-byte Device Serial Number
 607 * @dev: PCI device to query
 608 *
 609 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
 610 * Number.
 611 *
 612 * Returns the DSN, or zero if the capability does not exist.
 613 */
 614u64 pci_get_dsn(struct pci_dev *dev)
 615{
 616	u32 dword;
 617	u64 dsn;
 618	int pos;
 619
 620	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
 621	if (!pos)
 
 622		return 0;
 623
 624	/*
 625	 * The Device Serial Number is two dwords offset 4 bytes from the
 626	 * capability position. The specification says that the first dword is
 627	 * the lower half, and the second dword is the upper half.
 628	 */
 629	pos += 4;
 630	pci_read_config_dword(dev, pos, &dword);
 631	dsn = (u64)dword;
 632	pci_read_config_dword(dev, pos + 4, &dword);
 633	dsn |= ((u64)dword) << 32;
 
 634
 635	return dsn;
 636}
 637EXPORT_SYMBOL_GPL(pci_get_dsn);
 638
 639static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
 640{
 641	int rc, ttl = PCI_FIND_CAP_TTL;
 642	u8 cap, mask;
 643
 644	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 645		mask = HT_3BIT_CAP_MASK;
 646	else
 647		mask = HT_5BIT_CAP_MASK;
 648
 649	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 650				      PCI_CAP_ID_HT, &ttl);
 651	while (pos) {
 652		rc = pci_read_config_byte(dev, pos + 3, &cap);
 653		if (rc != PCIBIOS_SUCCESSFUL)
 654			return 0;
 655
 656		if ((cap & mask) == ht_cap)
 657			return pos;
 658
 659		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 660					      pos + PCI_CAP_LIST_NEXT,
 661					      PCI_CAP_ID_HT, &ttl);
 662	}
 663
 664	return 0;
 665}
 666
 667/**
 668 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
 669 * @dev: PCI device to query
 670 * @pos: Position from which to continue searching
 671 * @ht_cap: HyperTransport capability code
 672 *
 673 * To be used in conjunction with pci_find_ht_capability() to search for
 674 * all capabilities matching @ht_cap. @pos should always be a value returned
 675 * from pci_find_ht_capability().
 676 *
 677 * NB. To be 100% safe against broken PCI devices, the caller should take
 678 * steps to avoid an infinite loop.
 679 */
 680u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
 681{
 682	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 683}
 684EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 685
 686/**
 687 * pci_find_ht_capability - query a device's HyperTransport capabilities
 688 * @dev: PCI device to query
 689 * @ht_cap: HyperTransport capability code
 690 *
 691 * Tell if a device supports a given HyperTransport capability.
 692 * Returns an address within the device's PCI configuration space
 693 * or 0 in case the device does not support the request capability.
 694 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 695 * which has a HyperTransport capability matching @ht_cap.
 696 */
 697u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 698{
 699	u8 pos;
 700
 701	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 702	if (pos)
 703		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 704
 705	return pos;
 706}
 707EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 708
 709/**
 710 * pci_find_vsec_capability - Find a vendor-specific extended capability
 711 * @dev: PCI device to query
 712 * @vendor: Vendor ID for which capability is defined
 713 * @cap: Vendor-specific capability ID
 714 *
 715 * If @dev has Vendor ID @vendor, search for a VSEC capability with
 716 * VSEC ID @cap. If found, return the capability offset in
 717 * config space; otherwise return 0.
 718 */
 719u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
 720{
 721	u16 vsec = 0;
 722	u32 header;
 723
 724	if (vendor != dev->vendor)
 725		return 0;
 726
 727	while ((vsec = pci_find_next_ext_capability(dev, vsec,
 728						     PCI_EXT_CAP_ID_VNDR))) {
 729		if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
 730					  &header) == PCIBIOS_SUCCESSFUL &&
 731		    PCI_VNDR_HEADER_ID(header) == cap)
 732			return vsec;
 733	}
 734
 735	return 0;
 736}
 737EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
 738
 739/**
 740 * pci_find_dvsec_capability - Find DVSEC for vendor
 741 * @dev: PCI device to query
 742 * @vendor: Vendor ID to match for the DVSEC
 743 * @dvsec: Designated Vendor-specific capability ID
 744 *
 745 * If DVSEC has Vendor ID @vendor and DVSEC ID @dvsec return the capability
 746 * offset in config space; otherwise return 0.
 747 */
 748u16 pci_find_dvsec_capability(struct pci_dev *dev, u16 vendor, u16 dvsec)
 749{
 750	int pos;
 751
 752	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DVSEC);
 753	if (!pos)
 754		return 0;
 755
 756	while (pos) {
 757		u16 v, id;
 758
 759		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER1, &v);
 760		pci_read_config_word(dev, pos + PCI_DVSEC_HEADER2, &id);
 761		if (vendor == v && dvsec == id)
 762			return pos;
 763
 764		pos = pci_find_next_ext_capability(dev, pos, PCI_EXT_CAP_ID_DVSEC);
 765	}
 766
 767	return 0;
 768}
 769EXPORT_SYMBOL_GPL(pci_find_dvsec_capability);
 770
 771/**
 772 * pci_find_parent_resource - return resource region of parent bus of given
 773 *			      region
 774 * @dev: PCI device structure contains resources to be searched
 775 * @res: child resource record for which parent is sought
 776 *
 777 * For given resource region of given device, return the resource region of
 778 * parent bus the given region is contained in.
 
 779 */
 780struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 781					  struct resource *res)
 782{
 783	const struct pci_bus *bus = dev->bus;
 784	struct resource *r;
 785	int i;
 
 786
 787	pci_bus_for_each_resource(bus, r, i) {
 788		if (!r)
 789			continue;
 790		if (resource_contains(r, res)) {
 791
 792			/*
 793			 * If the window is prefetchable but the BAR is
 794			 * not, the allocator made a mistake.
 795			 */
 796			if (r->flags & IORESOURCE_PREFETCH &&
 797			    !(res->flags & IORESOURCE_PREFETCH))
 798				return NULL;
 799
 800			/*
 801			 * If we're below a transparent bridge, there may
 802			 * be both a positively-decoded aperture and a
 803			 * subtractively-decoded region that contain the BAR.
 804			 * We want the positively-decoded one, so this depends
 805			 * on pci_bus_for_each_resource() giving us those
 806			 * first.
 807			 */
 808			return r;
 809		}
 810	}
 811	return NULL;
 812}
 813EXPORT_SYMBOL(pci_find_parent_resource);
 814
 815/**
 816 * pci_find_resource - Return matching PCI device resource
 817 * @dev: PCI device to query
 818 * @res: Resource to look for
 819 *
 820 * Goes over standard PCI resources (BARs) and checks if the given resource
 821 * is partially or fully contained in any of them. In that case the
 822 * matching resource is returned, %NULL otherwise.
 823 */
 824struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
 825{
 826	int i;
 827
 828	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 829		struct resource *r = &dev->resource[i];
 830
 831		if (r->start && resource_contains(r, res))
 832			return r;
 833	}
 834
 835	return NULL;
 836}
 837EXPORT_SYMBOL(pci_find_resource);
 838
 839/**
 840 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
 841 * @dev: the PCI device to operate on
 842 * @pos: config space offset of status word
 843 * @mask: mask of bit(s) to care about in status word
 844 *
 845 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
 846 */
 847int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
 848{
 849	int i;
 850
 851	/* Wait for Transaction Pending bit clean */
 852	for (i = 0; i < 4; i++) {
 853		u16 status;
 854		if (i)
 855			msleep((1 << (i - 1)) * 100);
 856
 857		pci_read_config_word(dev, pos, &status);
 858		if (!(status & mask))
 859			return 1;
 860	}
 861
 862	return 0;
 863}
 864
 865static int pci_acs_enable;
 866
 867/**
 868 * pci_request_acs - ask for ACS to be enabled if supported
 869 */
 870void pci_request_acs(void)
 871{
 872	pci_acs_enable = 1;
 873}
 874
 875static const char *disable_acs_redir_param;
 876
 877/**
 878 * pci_disable_acs_redir - disable ACS redirect capabilities
 879 * @dev: the PCI device
 880 *
 881 * For only devices specified in the disable_acs_redir parameter.
 882 */
 883static void pci_disable_acs_redir(struct pci_dev *dev)
 884{
 885	int ret = 0;
 886	const char *p;
 887	int pos;
 888	u16 ctrl;
 889
 890	if (!disable_acs_redir_param)
 891		return;
 892
 893	p = disable_acs_redir_param;
 894	while (*p) {
 895		ret = pci_dev_str_match(dev, p, &p);
 896		if (ret < 0) {
 897			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
 898				     disable_acs_redir_param);
 899
 900			break;
 901		} else if (ret == 1) {
 902			/* Found a match */
 903			break;
 904		}
 905
 906		if (*p != ';' && *p != ',') {
 907			/* End of param or invalid format */
 908			break;
 909		}
 910		p++;
 911	}
 912
 913	if (ret != 1)
 914		return;
 915
 916	if (!pci_dev_specific_disable_acs_redir(dev))
 917		return;
 918
 919	pos = dev->acs_cap;
 920	if (!pos) {
 921		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
 922		return;
 923	}
 924
 925	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 926
 927	/* P2P Request & Completion Redirect */
 928	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
 929
 930	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 931
 932	pci_info(dev, "disabled ACS redirect\n");
 933}
 934
 935/**
 936 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
 937 * @dev: the PCI device
 938 */
 939static void pci_std_enable_acs(struct pci_dev *dev)
 940{
 941	int pos;
 942	u16 cap;
 943	u16 ctrl;
 944
 945	pos = dev->acs_cap;
 946	if (!pos)
 947		return;
 948
 949	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
 950	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 951
 952	/* Source Validation */
 953	ctrl |= (cap & PCI_ACS_SV);
 954
 955	/* P2P Request Redirect */
 956	ctrl |= (cap & PCI_ACS_RR);
 957
 958	/* P2P Completion Redirect */
 959	ctrl |= (cap & PCI_ACS_CR);
 960
 961	/* Upstream Forwarding */
 962	ctrl |= (cap & PCI_ACS_UF);
 963
 964	/* Enable Translation Blocking for external devices and noats */
 965	if (pci_ats_disabled() || dev->external_facing || dev->untrusted)
 966		ctrl |= (cap & PCI_ACS_TB);
 967
 968	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 969}
 970
 971/**
 972 * pci_enable_acs - enable ACS if hardware support it
 973 * @dev: the PCI device
 974 */
 975static void pci_enable_acs(struct pci_dev *dev)
 976{
 977	if (!pci_acs_enable)
 978		goto disable_acs_redir;
 979
 980	if (!pci_dev_specific_enable_acs(dev))
 981		goto disable_acs_redir;
 982
 983	pci_std_enable_acs(dev);
 984
 985disable_acs_redir:
 986	/*
 987	 * Note: pci_disable_acs_redir() must be called even if ACS was not
 988	 * enabled by the kernel because it may have been enabled by
 989	 * platform firmware.  So if we are told to disable it, we should
 990	 * always disable it after setting the kernel's default
 991	 * preferences.
 992	 */
 993	pci_disable_acs_redir(dev);
 994}
 995
 996/**
 997 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
 998 * @dev: PCI device to have its BARs restored
 999 *
1000 * Restore the BAR values for a given device, so as to make it
1001 * accessible by its driver.
1002 */
1003static void pci_restore_bars(struct pci_dev *dev)
 
1004{
1005	int i;
1006
1007	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
1008		pci_update_resource(dev, i);
1009}
1010
1011static inline bool platform_pci_power_manageable(struct pci_dev *dev)
1012{
1013	if (pci_use_mid_pm())
1014		return true;
1015
1016	return acpi_pci_power_manageable(dev);
1017}
1018
1019static inline int platform_pci_set_power_state(struct pci_dev *dev,
1020					       pci_power_t t)
1021{
1022	if (pci_use_mid_pm())
1023		return mid_pci_set_power_state(dev, t);
1024
1025	return acpi_pci_set_power_state(dev, t);
 
1026}
1027
1028static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
1029{
1030	if (pci_use_mid_pm())
1031		return mid_pci_get_power_state(dev);
1032
1033	return acpi_pci_get_power_state(dev);
1034}
1035
1036static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
 
1037{
1038	if (!pci_use_mid_pm())
1039		acpi_pci_refresh_power_state(dev);
1040}
1041
1042static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
1043{
1044	if (pci_use_mid_pm())
1045		return PCI_POWER_ERROR;
1046
1047	return acpi_pci_choose_state(dev);
1048}
1049
1050static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1051{
1052	if (pci_use_mid_pm())
1053		return PCI_POWER_ERROR;
1054
1055	return acpi_pci_wakeup(dev, enable);
1056}
1057
1058static inline bool platform_pci_need_resume(struct pci_dev *dev)
1059{
1060	if (pci_use_mid_pm())
1061		return false;
1062
1063	return acpi_pci_need_resume(dev);
1064}
1065
1066static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1067{
1068	if (pci_use_mid_pm())
1069		return false;
1070
1071	return acpi_pci_bridge_d3(dev);
1072}
1073
1074/**
1075 * pci_update_current_state - Read power state of given device and cache it
 
1076 * @dev: PCI device to handle.
1077 * @state: State to cache in case the device doesn't have the PM capability
1078 *
1079 * The power state is read from the PMCSR register, which however is
1080 * inaccessible in D3cold.  The platform firmware is therefore queried first
1081 * to detect accessibility of the register.  In case the platform firmware
1082 * reports an incorrect state or the device isn't power manageable by the
1083 * platform at all, we try to detect D3cold by testing accessibility of the
1084 * vendor ID in config space.
1085 */
1086void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1087{
1088	if (platform_pci_get_power_state(dev) == PCI_D3cold) {
1089		dev->current_state = PCI_D3cold;
1090	} else if (dev->pm_cap) {
1091		u16 pmcsr;
1092
1093		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1094		if (PCI_POSSIBLE_ERROR(pmcsr)) {
1095			dev->current_state = PCI_D3cold;
1096			return;
1097		}
1098		dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1099	} else {
1100		dev->current_state = state;
1101	}
1102}
1103
1104/**
1105 * pci_refresh_power_state - Refresh the given device's power state data
1106 * @dev: Target PCI device.
1107 *
1108 * Ask the platform to refresh the devices power state information and invoke
1109 * pci_update_current_state() to update its current PCI power state.
1110 */
1111void pci_refresh_power_state(struct pci_dev *dev)
1112{
1113	platform_pci_refresh_power_state(dev);
1114	pci_update_current_state(dev, dev->current_state);
1115}
1116
1117/**
1118 * pci_platform_power_transition - Use platform to change device power state
1119 * @dev: PCI device to handle.
1120 * @state: State to put the device into.
1121 */
1122int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1123{
1124	int error;
1125
1126	error = platform_pci_set_power_state(dev, state);
1127	if (!error)
1128		pci_update_current_state(dev, state);
1129	else if (!dev->pm_cap) /* Fall back to PCI_D0 */
1130		dev->current_state = PCI_D0;
1131
1132	return error;
1133}
1134EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1135
1136static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1137{
1138	pm_request_resume(&pci_dev->dev);
1139	return 0;
1140}
1141
1142/**
1143 * pci_resume_bus - Walk given bus and runtime resume devices on it
1144 * @bus: Top bus of the subtree to walk.
1145 */
1146void pci_resume_bus(struct pci_bus *bus)
1147{
1148	if (bus)
1149		pci_walk_bus(bus, pci_resume_one, NULL);
1150}
1151
1152static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1153{
1154	int delay = 1;
1155	u32 id;
1156
1157	/*
1158	 * After reset, the device should not silently discard config
1159	 * requests, but it may still indicate that it needs more time by
1160	 * responding to them with CRS completions.  The Root Port will
1161	 * generally synthesize ~0 (PCI_ERROR_RESPONSE) data to complete
1162	 * the read (except when CRS SV is enabled and the read was for the
1163	 * Vendor ID; in that case it synthesizes 0x0001 data).
1164	 *
1165	 * Wait for the device to return a non-CRS completion.  Read the
1166	 * Command register instead of Vendor ID so we don't have to
1167	 * contend with the CRS SV value.
1168	 */
1169	pci_read_config_dword(dev, PCI_COMMAND, &id);
1170	while (PCI_POSSIBLE_ERROR(id)) {
1171		if (delay > timeout) {
1172			pci_warn(dev, "not ready %dms after %s; giving up\n",
1173				 delay - 1, reset_type);
1174			return -ENOTTY;
1175		}
1176
1177		if (delay > 1000)
1178			pci_info(dev, "not ready %dms after %s; waiting\n",
1179				 delay - 1, reset_type);
1180
1181		msleep(delay);
1182		delay *= 2;
1183		pci_read_config_dword(dev, PCI_COMMAND, &id);
1184	}
1185
1186	if (delay > 1000)
1187		pci_info(dev, "ready %dms after %s\n", delay - 1,
1188			 reset_type);
1189
1190	return 0;
1191}
1192
1193/**
1194 * pci_power_up - Put the given device into D0
1195 * @dev: PCI device to power up
1196 *
1197 * On success, return 0 or 1, depending on whether or not it is necessary to
1198 * restore the device's BARs subsequently (1 is returned in that case).
1199 */
1200int pci_power_up(struct pci_dev *dev)
1201{
1202	bool need_restore;
1203	pci_power_t state;
1204	u16 pmcsr;
1205
1206	platform_pci_set_power_state(dev, PCI_D0);
1207
1208	if (!dev->pm_cap) {
1209		state = platform_pci_get_power_state(dev);
1210		if (state == PCI_UNKNOWN)
1211			dev->current_state = PCI_D0;
1212		else
1213			dev->current_state = state;
1214
1215		if (state == PCI_D0)
1216			return 0;
1217
1218		return -EIO;
1219	}
1220
1221	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1222	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1223		pci_err(dev, "Unable to change power state from %s to D0, device inaccessible\n",
1224			pci_power_name(dev->current_state));
1225		dev->current_state = PCI_D3cold;
1226		return -EIO;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1227	}
1228
1229	state = pmcsr & PCI_PM_CTRL_STATE_MASK;
 
1230
1231	need_restore = (state == PCI_D3hot || dev->current_state >= PCI_D3hot) &&
1232			!(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET);
1233
1234	if (state == PCI_D0)
1235		goto end;
1236
1237	/*
1238	 * Force the entire word to 0. This doesn't affect PME_Status, disables
1239	 * PME_En, and sets PowerState to 0.
1240	 */
1241	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, 0);
1242
1243	/* Mandatory transition delays; see PCI PM 1.2. */
1244	if (state == PCI_D3hot)
1245		pci_dev_d3_sleep(dev);
1246	else if (state == PCI_D2)
1247		udelay(PCI_PM_D2_DELAY);
1248
1249end:
1250	dev->current_state = PCI_D0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1251	if (need_restore)
1252		return 1;
 
 
 
1253
1254	return 0;
1255}
1256
1257/**
1258 * pci_set_full_power_state - Put a PCI device into D0 and update its state
1259 * @dev: PCI device to power up
1260 *
1261 * Call pci_power_up() to put @dev into D0, read from its PCI_PM_CTRL register
1262 * to confirm the state change, restore its BARs if they might be lost and
1263 * reconfigure ASPM in acordance with the new power state.
1264 *
1265 * If pci_restore_state() is going to be called right after a power state change
1266 * to D0, it is more efficient to use pci_power_up() directly instead of this
1267 * function.
1268 */
1269static int pci_set_full_power_state(struct pci_dev *dev)
1270{
1271	u16 pmcsr;
1272	int ret;
1273
1274	ret = pci_power_up(dev);
1275	if (ret < 0)
1276		return ret;
1277
1278	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1279	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1280	if (dev->current_state != PCI_D0) {
1281		pci_info_ratelimited(dev, "Refused to change power state from %s to D0\n",
1282				     pci_power_name(dev->current_state));
1283	} else if (ret > 0) {
1284		/*
1285		 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1286		 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1287		 * from D3hot to D0 _may_ perform an internal reset, thereby
1288		 * going to "D0 Uninitialized" rather than "D0 Initialized".
1289		 * For example, at least some versions of the 3c905B and the
1290		 * 3c556B exhibit this behaviour.
1291		 *
1292		 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1293		 * devices in a D3hot state at boot.  Consequently, we need to
1294		 * restore at least the BARs so that the device will be
1295		 * accessible to its driver.
1296		 */
1297		pci_restore_bars(dev);
1298	}
1299
1300	return 0;
1301}
1302
1303/**
1304 * __pci_dev_set_current_state - Set current state of a PCI device
1305 * @dev: Device to handle
1306 * @data: pointer to state to be set
1307 */
1308static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1309{
1310	pci_power_t state = *(pci_power_t *)data;
1311
1312	dev->current_state = state;
1313	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1314}
1315
1316/**
1317 * pci_bus_set_current_state - Walk given bus and set current state of devices
1318 * @bus: Top bus of the subtree to walk.
1319 * @state: state to be set
1320 */
1321void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1322{
1323	if (bus)
1324		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1325}
1326
1327/**
1328 * pci_set_low_power_state - Put a PCI device into a low-power state.
1329 * @dev: PCI device to handle.
1330 * @state: PCI power state (D1, D2, D3hot) to put the device into.
1331 *
1332 * Use the device's PCI_PM_CTRL register to put it into a low-power state.
1333 *
1334 * RETURN VALUE:
1335 * -EINVAL if the requested state is invalid.
1336 * -EIO if device does not support PCI PM or its PM capabilities register has a
1337 * wrong version, or device doesn't support the requested state.
1338 * 0 if device already is in the requested state.
1339 * 0 if device's power state has been successfully changed.
1340 */
1341static int pci_set_low_power_state(struct pci_dev *dev, pci_power_t state)
1342{
1343	u16 pmcsr;
1344
1345	if (!dev->pm_cap)
1346		return -EIO;
1347
1348	/*
1349	 * Validate transition: We can enter D0 from any state, but if
1350	 * we're already in a low-power state, we can only go deeper.  E.g.,
1351	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1352	 * we'd have to go from D3 to D0, then to D1.
1353	 */
1354	if (dev->current_state <= PCI_D3cold && dev->current_state > state) {
1355		pci_dbg(dev, "Invalid power transition (from %s to %s)\n",
1356			pci_power_name(dev->current_state),
1357			pci_power_name(state));
1358		return -EINVAL;
1359	}
1360
1361	/* Check if this device supports the desired state */
1362	if ((state == PCI_D1 && !dev->d1_support)
1363	   || (state == PCI_D2 && !dev->d2_support))
1364		return -EIO;
1365
1366	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1367	if (PCI_POSSIBLE_ERROR(pmcsr)) {
1368		pci_err(dev, "Unable to change power state from %s to %s, device inaccessible\n",
1369			pci_power_name(dev->current_state),
1370			pci_power_name(state));
1371		dev->current_state = PCI_D3cold;
1372		return -EIO;
1373	}
1374
1375	pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1376	pmcsr |= state;
1377
1378	/* Enter specified state */
1379	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1380
1381	/* Mandatory power management transition delays; see PCI PM 1.2. */
1382	if (state == PCI_D3hot)
1383		pci_dev_d3_sleep(dev);
1384	else if (state == PCI_D2)
1385		udelay(PCI_PM_D2_DELAY);
1386
1387	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1388	dev->current_state = pmcsr & PCI_PM_CTRL_STATE_MASK;
1389	if (dev->current_state != state)
1390		pci_info_ratelimited(dev, "Refused to change power state from %s to %s\n",
1391				     pci_power_name(dev->current_state),
1392				     pci_power_name(state));
1393
1394	return 0;
1395}
 
1396
1397/**
1398 * pci_set_power_state - Set the power state of a PCI device
1399 * @dev: PCI device to handle.
1400 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1401 *
1402 * Transition a device to a new power state, using the platform firmware and/or
1403 * the device's PCI PM registers.
1404 *
1405 * RETURN VALUE:
1406 * -EINVAL if the requested state is invalid.
1407 * -EIO if device does not support PCI PM or its PM capabilities register has a
1408 * wrong version, or device doesn't support the requested state.
1409 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1410 * 0 if device already is in the requested state.
1411 * 0 if the transition is to D3 but D3 is not supported.
1412 * 0 if device's power state has been successfully changed.
1413 */
1414int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1415{
1416	int error;
1417
1418	/* Bound the state we're entering */
1419	if (state > PCI_D3cold)
1420		state = PCI_D3cold;
1421	else if (state < PCI_D0)
1422		state = PCI_D0;
1423	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1424
1425		/*
1426		 * If the device or the parent bridge do not support PCI
1427		 * PM, ignore the request if we're doing anything other
1428		 * than putting it into D0 (which would only happen on
1429		 * boot).
1430		 */
1431		return 0;
1432
1433	/* Check if we're already there */
1434	if (dev->current_state == state)
 
 
 
1435		return 0;
1436
1437	if (state == PCI_D0)
1438		return pci_set_full_power_state(dev);
1439
 
 
1440	/*
1441	 * This device is quirked not to be put into D3, so don't put it in
1442	 * D3
1443	 */
1444	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1445		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
1446
1447	if (state == PCI_D3cold) {
1448		/*
1449		 * To put the device in D3cold, put it into D3hot in the native
1450		 * way, then put it into D3cold using platform ops.
1451		 */
1452		error = pci_set_low_power_state(dev, PCI_D3hot);
1453
1454		if (pci_platform_power_transition(dev, PCI_D3cold))
1455			return error;
1456
1457		/* Powering off a bridge may power off the whole hierarchy */
1458		if (dev->current_state == PCI_D3cold)
1459			pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1460	} else {
1461		error = pci_set_low_power_state(dev, state);
1462
1463		if (pci_platform_power_transition(dev, state))
1464			return error;
 
 
 
 
 
 
 
 
 
 
 
1465	}
 
 
1466
1467	return 0;
1468}
1469EXPORT_SYMBOL(pci_set_power_state);
1470
1471#define PCI_EXP_SAVE_REGS	7
1472
1473static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1474						       u16 cap, bool extended)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1475{
1476	struct pci_cap_saved_state *tmp;
 
1477
1478	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1479		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1480			return tmp;
1481	}
1482	return NULL;
1483}
1484
1485struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1486{
1487	return _pci_find_saved_cap(dev, cap, false);
1488}
1489
1490struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1491{
1492	return _pci_find_saved_cap(dev, cap, true);
1493}
1494
1495static int pci_save_pcie_state(struct pci_dev *dev)
1496{
1497	int i = 0;
1498	struct pci_cap_saved_state *save_state;
1499	u16 *cap;
 
1500
1501	if (!pci_is_pcie(dev))
 
1502		return 0;
1503
1504	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1505	if (!save_state) {
1506		pci_err(dev, "buffer not found in %s\n", __func__);
1507		return -ENOMEM;
1508	}
1509
1510	cap = (u16 *)&save_state->cap.data[0];
1511	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1512	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1513	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1514	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1515	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1516	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1517	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
1518
1519	return 0;
1520}
1521
1522void pci_bridge_reconfigure_ltr(struct pci_dev *dev)
1523{
1524#ifdef CONFIG_PCIEASPM
1525	struct pci_dev *bridge;
1526	u32 ctl;
 
 
 
 
 
 
 
 
 
1527
1528	bridge = pci_upstream_bridge(dev);
1529	if (bridge && bridge->ltr_path) {
1530		pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2, &ctl);
1531		if (!(ctl & PCI_EXP_DEVCTL2_LTR_EN)) {
1532			pci_dbg(bridge, "re-enabling LTR\n");
1533			pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
1534						 PCI_EXP_DEVCTL2_LTR_EN);
1535		}
1536	}
1537#endif
1538}
1539
1540static void pci_restore_pcie_state(struct pci_dev *dev)
1541{
1542	int i = 0;
1543	struct pci_cap_saved_state *save_state;
1544	u16 *cap;
 
1545
1546	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1547	if (!save_state)
 
1548		return;
 
1549
1550	/*
1551	 * Downstream ports reset the LTR enable bit when link goes down.
1552	 * Check and re-configure the bit here before restoring device.
1553	 * PCIe r5.0, sec 7.5.3.16.
1554	 */
1555	pci_bridge_reconfigure_ltr(dev);
1556
1557	cap = (u16 *)&save_state->cap.data[0];
1558	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1559	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1560	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1561	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1562	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1563	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1564	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
 
 
 
 
 
 
1565}
1566
 
1567static int pci_save_pcix_state(struct pci_dev *dev)
1568{
1569	int pos;
1570	struct pci_cap_saved_state *save_state;
1571
1572	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1573	if (!pos)
1574		return 0;
1575
1576	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1577	if (!save_state) {
1578		pci_err(dev, "buffer not found in %s\n", __func__);
1579		return -ENOMEM;
1580	}
1581
1582	pci_read_config_word(dev, pos + PCI_X_CMD,
1583			     (u16 *)save_state->cap.data);
1584
1585	return 0;
1586}
1587
1588static void pci_restore_pcix_state(struct pci_dev *dev)
1589{
1590	int i = 0, pos;
1591	struct pci_cap_saved_state *save_state;
1592	u16 *cap;
1593
1594	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1595	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1596	if (!save_state || !pos)
1597		return;
1598	cap = (u16 *)&save_state->cap.data[0];
1599
1600	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1601}
1602
1603static void pci_save_ltr_state(struct pci_dev *dev)
1604{
1605	int ltr;
1606	struct pci_cap_saved_state *save_state;
1607	u32 *cap;
1608
1609	if (!pci_is_pcie(dev))
1610		return;
1611
1612	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1613	if (!ltr)
1614		return;
1615
1616	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1617	if (!save_state) {
1618		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1619		return;
1620	}
1621
1622	/* Some broken devices only support dword access to LTR */
1623	cap = &save_state->cap.data[0];
1624	pci_read_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap);
1625}
1626
1627static void pci_restore_ltr_state(struct pci_dev *dev)
1628{
1629	struct pci_cap_saved_state *save_state;
1630	int ltr;
1631	u32 *cap;
1632
1633	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1634	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1635	if (!save_state || !ltr)
1636		return;
1637
1638	/* Some broken devices only support dword access to LTR */
1639	cap = &save_state->cap.data[0];
1640	pci_write_config_dword(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap);
1641}
1642
1643/**
1644 * pci_save_state - save the PCI configuration space of a device before
1645 *		    suspending
1646 * @dev: PCI device that we're dealing with
1647 */
1648int pci_save_state(struct pci_dev *dev)
 
1649{
1650	int i;
1651	/* XXX: 100% dword access ok here? */
1652	for (i = 0; i < 16; i++) {
1653		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1654		pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1655			i * 4, dev->saved_config_space[i]);
1656	}
1657	dev->state_saved = true;
1658
1659	i = pci_save_pcie_state(dev);
1660	if (i != 0)
1661		return i;
1662
1663	i = pci_save_pcix_state(dev);
1664	if (i != 0)
1665		return i;
1666
1667	pci_save_ltr_state(dev);
1668	pci_save_dpc_state(dev);
1669	pci_save_aer_state(dev);
1670	pci_save_ptm_state(dev);
1671	return pci_save_vc_state(dev);
1672}
1673EXPORT_SYMBOL(pci_save_state);
1674
1675static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1676				     u32 saved_val, int retry, bool force)
1677{
1678	u32 val;
1679
1680	pci_read_config_dword(pdev, offset, &val);
1681	if (!force && val == saved_val)
1682		return;
1683
1684	for (;;) {
1685		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1686			offset, val, saved_val);
1687		pci_write_config_dword(pdev, offset, saved_val);
1688		if (retry-- <= 0)
1689			return;
1690
1691		pci_read_config_dword(pdev, offset, &val);
1692		if (val == saved_val)
1693			return;
1694
1695		mdelay(1);
1696	}
1697}
1698
1699static void pci_restore_config_space_range(struct pci_dev *pdev,
1700					   int start, int end, int retry,
1701					   bool force)
1702{
1703	int index;
1704
1705	for (index = end; index >= start; index--)
1706		pci_restore_config_dword(pdev, 4 * index,
1707					 pdev->saved_config_space[index],
1708					 retry, force);
1709}
1710
1711static void pci_restore_config_space(struct pci_dev *pdev)
1712{
1713	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1714		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1715		/* Restore BARs before the command register. */
1716		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1717		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1718	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1719		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1720
1721		/*
1722		 * Force rewriting of prefetch registers to avoid S3 resume
1723		 * issues on Intel PCI bridges that occur when these
1724		 * registers are not explicitly written.
1725		 */
1726		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1727		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1728	} else {
1729		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1730	}
1731}
1732
1733static void pci_restore_rebar_state(struct pci_dev *pdev)
1734{
1735	unsigned int pos, nbars, i;
1736	u32 ctrl;
1737
1738	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1739	if (!pos)
1740		return;
1741
1742	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1743	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1744		    PCI_REBAR_CTRL_NBAR_SHIFT;
1745
1746	for (i = 0; i < nbars; i++, pos += 8) {
1747		struct resource *res;
1748		int bar_idx, size;
1749
1750		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1751		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1752		res = pdev->resource + bar_idx;
1753		size = pci_rebar_bytes_to_size(resource_size(res));
1754		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1755		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1756		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1757	}
1758}
1759
1760/**
1761 * pci_restore_state - Restore the saved state of a PCI device
1762 * @dev: PCI device that we're dealing with
1763 */
1764void pci_restore_state(struct pci_dev *dev)
1765{
1766	if (!dev->state_saved)
1767		return;
1768
1769	/*
1770	 * Restore max latencies (in the LTR capability) before enabling
1771	 * LTR itself (in the PCIe capability).
1772	 */
1773	pci_restore_ltr_state(dev);
1774
1775	pci_restore_pcie_state(dev);
1776	pci_restore_pasid_state(dev);
1777	pci_restore_pri_state(dev);
1778	pci_restore_ats_state(dev);
1779	pci_restore_vc_state(dev);
1780	pci_restore_rebar_state(dev);
1781	pci_restore_dpc_state(dev);
1782	pci_restore_ptm_state(dev);
1783
1784	pci_aer_clear_status(dev);
1785	pci_restore_aer_state(dev);
1786
1787	pci_restore_config_space(dev);
1788
1789	pci_restore_pcix_state(dev);
1790	pci_restore_msi_state(dev);
1791
1792	/* Restore ACS and IOV configuration state */
1793	pci_enable_acs(dev);
1794	pci_restore_iov_state(dev);
1795
1796	dev->state_saved = false;
1797}
1798EXPORT_SYMBOL(pci_restore_state);
1799
1800struct pci_saved_state {
1801	u32 config_space[16];
1802	struct pci_cap_saved_data cap[];
1803};
1804
1805/**
1806 * pci_store_saved_state - Allocate and return an opaque struct containing
1807 *			   the device saved state.
1808 * @dev: PCI device that we're dealing with
1809 *
1810 * Return NULL if no state or error.
1811 */
1812struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1813{
1814	struct pci_saved_state *state;
1815	struct pci_cap_saved_state *tmp;
1816	struct pci_cap_saved_data *cap;
 
1817	size_t size;
1818
1819	if (!dev->state_saved)
1820		return NULL;
1821
1822	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1823
1824	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1825		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1826
1827	state = kzalloc(size, GFP_KERNEL);
1828	if (!state)
1829		return NULL;
1830
1831	memcpy(state->config_space, dev->saved_config_space,
1832	       sizeof(state->config_space));
1833
1834	cap = state->cap;
1835	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1836		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1837		memcpy(cap, &tmp->cap, len);
1838		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1839	}
1840	/* Empty cap_save terminates list */
1841
1842	return state;
1843}
1844EXPORT_SYMBOL_GPL(pci_store_saved_state);
1845
1846/**
1847 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1848 * @dev: PCI device that we're dealing with
1849 * @state: Saved state returned from pci_store_saved_state()
1850 */
1851int pci_load_saved_state(struct pci_dev *dev,
1852			 struct pci_saved_state *state)
1853{
1854	struct pci_cap_saved_data *cap;
1855
1856	dev->state_saved = false;
1857
1858	if (!state)
1859		return 0;
1860
1861	memcpy(dev->saved_config_space, state->config_space,
1862	       sizeof(state->config_space));
1863
1864	cap = state->cap;
1865	while (cap->size) {
1866		struct pci_cap_saved_state *tmp;
1867
1868		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1869		if (!tmp || tmp->cap.size != cap->size)
1870			return -EINVAL;
1871
1872		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1873		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1874		       sizeof(struct pci_cap_saved_data) + cap->size);
1875	}
1876
1877	dev->state_saved = true;
1878	return 0;
1879}
1880EXPORT_SYMBOL_GPL(pci_load_saved_state);
1881
1882/**
1883 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1884 *				   and free the memory allocated for it.
1885 * @dev: PCI device that we're dealing with
1886 * @state: Pointer to saved state returned from pci_store_saved_state()
1887 */
1888int pci_load_and_free_saved_state(struct pci_dev *dev,
1889				  struct pci_saved_state **state)
1890{
1891	int ret = pci_load_saved_state(dev, *state);
1892	kfree(*state);
1893	*state = NULL;
1894	return ret;
1895}
1896EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1897
1898int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1899{
1900	return pci_enable_resources(dev, bars);
1901}
1902
1903static int do_pci_enable_device(struct pci_dev *dev, int bars)
1904{
1905	int err;
1906	struct pci_dev *bridge;
1907	u16 cmd;
1908	u8 pin;
1909
1910	err = pci_set_power_state(dev, PCI_D0);
1911	if (err < 0 && err != -EIO)
1912		return err;
1913
1914	bridge = pci_upstream_bridge(dev);
1915	if (bridge)
1916		pcie_aspm_powersave_config_link(bridge);
1917
1918	err = pcibios_enable_device(dev, bars);
1919	if (err < 0)
1920		return err;
1921	pci_fixup_device(pci_fixup_enable, dev);
1922
1923	if (dev->msi_enabled || dev->msix_enabled)
1924		return 0;
1925
1926	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1927	if (pin) {
1928		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1929		if (cmd & PCI_COMMAND_INTX_DISABLE)
1930			pci_write_config_word(dev, PCI_COMMAND,
1931					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1932	}
1933
1934	return 0;
1935}
1936
1937/**
1938 * pci_reenable_device - Resume abandoned device
1939 * @dev: PCI device to be resumed
1940 *
1941 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1942 * to be called by normal code, write proper resume handler and use it instead.
1943 */
1944int pci_reenable_device(struct pci_dev *dev)
1945{
1946	if (pci_is_enabled(dev))
1947		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1948	return 0;
1949}
1950EXPORT_SYMBOL(pci_reenable_device);
1951
1952static void pci_enable_bridge(struct pci_dev *dev)
 
1953{
1954	struct pci_dev *bridge;
1955	int retval;
1956
1957	bridge = pci_upstream_bridge(dev);
1958	if (bridge)
1959		pci_enable_bridge(bridge);
1960
1961	if (pci_is_enabled(dev)) {
1962		if (!dev->is_busmaster)
1963			pci_set_master(dev);
1964		return;
1965	}
1966
1967	retval = pci_enable_device(dev);
1968	if (retval)
1969		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1970			retval);
1971	pci_set_master(dev);
1972}
1973
1974static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1975{
1976	struct pci_dev *bridge;
1977	int err;
1978	int i, bars = 0;
1979
1980	/*
1981	 * Power state could be unknown at this point, either due to a fresh
1982	 * boot or a device removal call.  So get the current power state
1983	 * so that things like MSI message writing will behave as expected
1984	 * (e.g. if the device really is in D0 at enable time).
1985	 */
1986	pci_update_current_state(dev, dev->current_state);
 
 
 
 
1987
1988	if (atomic_inc_return(&dev->enable_cnt) > 1)
1989		return 0;		/* already enabled */
1990
1991	bridge = pci_upstream_bridge(dev);
1992	if (bridge)
1993		pci_enable_bridge(bridge);
1994
1995	/* only skip sriov related */
1996	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1997		if (dev->resource[i].flags & flags)
1998			bars |= (1 << i);
1999	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
2000		if (dev->resource[i].flags & flags)
2001			bars |= (1 << i);
2002
2003	err = do_pci_enable_device(dev, bars);
2004	if (err < 0)
2005		atomic_dec(&dev->enable_cnt);
2006	return err;
2007}
2008
2009/**
2010 * pci_enable_device_io - Initialize a device for use with IO space
2011 * @dev: PCI device to be initialized
2012 *
2013 * Initialize device before it's used by a driver. Ask low-level code
2014 * to enable I/O resources. Wake up the device if it was suspended.
2015 * Beware, this function can fail.
2016 */
2017int pci_enable_device_io(struct pci_dev *dev)
2018{
2019	return pci_enable_device_flags(dev, IORESOURCE_IO);
2020}
2021EXPORT_SYMBOL(pci_enable_device_io);
2022
2023/**
2024 * pci_enable_device_mem - Initialize a device for use with Memory space
2025 * @dev: PCI device to be initialized
2026 *
2027 * Initialize device before it's used by a driver. Ask low-level code
2028 * to enable Memory resources. Wake up the device if it was suspended.
2029 * Beware, this function can fail.
2030 */
2031int pci_enable_device_mem(struct pci_dev *dev)
2032{
2033	return pci_enable_device_flags(dev, IORESOURCE_MEM);
2034}
2035EXPORT_SYMBOL(pci_enable_device_mem);
2036
2037/**
2038 * pci_enable_device - Initialize device before it's used by a driver.
2039 * @dev: PCI device to be initialized
2040 *
2041 * Initialize device before it's used by a driver. Ask low-level code
2042 * to enable I/O and memory. Wake up the device if it was suspended.
2043 * Beware, this function can fail.
2044 *
2045 * Note we don't actually enable the device many times if we call
2046 * this function repeatedly (we just increment the count).
2047 */
2048int pci_enable_device(struct pci_dev *dev)
2049{
2050	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
2051}
2052EXPORT_SYMBOL(pci_enable_device);
2053
2054/*
2055 * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
2056 * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
2057 * there's no need to track it separately.  pci_devres is initialized
2058 * when a device is enabled using managed PCI device enable interface.
2059 */
2060struct pci_devres {
2061	unsigned int enabled:1;
2062	unsigned int pinned:1;
2063	unsigned int orig_intx:1;
2064	unsigned int restore_intx:1;
2065	unsigned int mwi:1;
2066	u32 region_mask;
2067};
2068
2069static void pcim_release(struct device *gendev, void *res)
2070{
2071	struct pci_dev *dev = to_pci_dev(gendev);
2072	struct pci_devres *this = res;
2073	int i;
2074
 
 
 
 
 
2075	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2076		if (this->region_mask & (1 << i))
2077			pci_release_region(dev, i);
2078
2079	if (this->mwi)
2080		pci_clear_mwi(dev);
2081
2082	if (this->restore_intx)
2083		pci_intx(dev, this->orig_intx);
2084
2085	if (this->enabled && !this->pinned)
2086		pci_disable_device(dev);
2087}
2088
2089static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2090{
2091	struct pci_devres *dr, *new_dr;
2092
2093	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2094	if (dr)
2095		return dr;
2096
2097	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2098	if (!new_dr)
2099		return NULL;
2100	return devres_get(&pdev->dev, new_dr, NULL, NULL);
2101}
2102
2103static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2104{
2105	if (pci_is_managed(pdev))
2106		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2107	return NULL;
2108}
2109
2110/**
2111 * pcim_enable_device - Managed pci_enable_device()
2112 * @pdev: PCI device to be initialized
2113 *
2114 * Managed pci_enable_device().
2115 */
2116int pcim_enable_device(struct pci_dev *pdev)
2117{
2118	struct pci_devres *dr;
2119	int rc;
2120
2121	dr = get_pci_dr(pdev);
2122	if (unlikely(!dr))
2123		return -ENOMEM;
2124	if (dr->enabled)
2125		return 0;
2126
2127	rc = pci_enable_device(pdev);
2128	if (!rc) {
2129		pdev->is_managed = 1;
2130		dr->enabled = 1;
2131	}
2132	return rc;
2133}
2134EXPORT_SYMBOL(pcim_enable_device);
2135
2136/**
2137 * pcim_pin_device - Pin managed PCI device
2138 * @pdev: PCI device to pin
2139 *
2140 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
2141 * driver detach.  @pdev must have been enabled with
2142 * pcim_enable_device().
2143 */
2144void pcim_pin_device(struct pci_dev *pdev)
2145{
2146	struct pci_devres *dr;
2147
2148	dr = find_pci_dr(pdev);
2149	WARN_ON(!dr || !dr->enabled);
2150	if (dr)
2151		dr->pinned = 1;
2152}
2153EXPORT_SYMBOL(pcim_pin_device);
2154
2155/*
2156 * pcibios_device_add - provide arch specific hooks when adding device dev
2157 * @dev: the PCI device being added
2158 *
2159 * Permits the platform to provide architecture specific functionality when
2160 * devices are added. This is the default implementation. Architecture
2161 * implementations can override this.
2162 */
2163int __weak pcibios_device_add(struct pci_dev *dev)
2164{
2165	return 0;
2166}
2167
2168/**
2169 * pcibios_release_device - provide arch specific hooks when releasing
2170 *			    device dev
2171 * @dev: the PCI device being released
2172 *
2173 * Permits the platform to provide architecture specific functionality when
2174 * devices are released. This is the default implementation. Architecture
2175 * implementations can override this.
2176 */
2177void __weak pcibios_release_device(struct pci_dev *dev) {}
2178
2179/**
2180 * pcibios_disable_device - disable arch specific PCI resources for device dev
2181 * @dev: the PCI device to disable
2182 *
2183 * Disables architecture specific PCI resources for the device. This
2184 * is the default implementation. Architecture implementations can
2185 * override this.
2186 */
2187void __weak pcibios_disable_device(struct pci_dev *dev) {}
2188
2189/**
2190 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2191 * @irq: ISA IRQ to penalize
2192 * @active: IRQ active or not
2193 *
2194 * Permits the platform to provide architecture-specific functionality when
2195 * penalizing ISA IRQs. This is the default implementation. Architecture
2196 * implementations can override this.
2197 */
2198void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2199
2200static void do_pci_disable_device(struct pci_dev *dev)
2201{
2202	u16 pci_command;
2203
2204	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2205	if (pci_command & PCI_COMMAND_MASTER) {
2206		pci_command &= ~PCI_COMMAND_MASTER;
2207		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2208	}
2209
2210	pcibios_disable_device(dev);
2211}
2212
2213/**
2214 * pci_disable_enabled_device - Disable device without updating enable_cnt
2215 * @dev: PCI device to disable
2216 *
2217 * NOTE: This function is a backend of PCI power management routines and is
2218 * not supposed to be called drivers.
2219 */
2220void pci_disable_enabled_device(struct pci_dev *dev)
2221{
2222	if (pci_is_enabled(dev))
2223		do_pci_disable_device(dev);
2224}
2225
2226/**
2227 * pci_disable_device - Disable PCI device after use
2228 * @dev: PCI device to be disabled
2229 *
2230 * Signal to the system that the PCI device is not in use by the system
2231 * anymore.  This only involves disabling PCI bus-mastering, if active.
2232 *
2233 * Note we don't actually disable the device until all callers of
2234 * pci_enable_device() have called pci_disable_device().
2235 */
2236void pci_disable_device(struct pci_dev *dev)
 
2237{
2238	struct pci_devres *dr;
2239
2240	dr = find_pci_dr(dev);
2241	if (dr)
2242		dr->enabled = 0;
2243
2244	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2245		      "disabling already-disabled device");
2246
2247	if (atomic_dec_return(&dev->enable_cnt) != 0)
2248		return;
2249
2250	do_pci_disable_device(dev);
2251
2252	dev->is_busmaster = 0;
2253}
2254EXPORT_SYMBOL(pci_disable_device);
2255
2256/**
2257 * pcibios_set_pcie_reset_state - set reset state for device dev
2258 * @dev: the PCIe device reset
2259 * @state: Reset state to enter into
2260 *
2261 * Set the PCIe reset state for the device. This is the default
 
2262 * implementation. Architecture implementations can override this.
2263 */
2264int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2265					enum pcie_reset_state state)
2266{
2267	return -EINVAL;
2268}
2269
2270/**
2271 * pci_set_pcie_reset_state - set reset state for device dev
2272 * @dev: the PCIe device reset
2273 * @state: Reset state to enter into
2274 *
 
2275 * Sets the PCI reset state for the device.
2276 */
2277int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2278{
2279	return pcibios_set_pcie_reset_state(dev, state);
2280}
2281EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2282
2283#ifdef CONFIG_PCIEAER
2284void pcie_clear_device_status(struct pci_dev *dev)
2285{
2286	u16 sta;
2287
2288	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2289	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2290}
2291#endif
2292
2293/**
2294 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2295 * @dev: PCIe root port or event collector.
2296 */
2297void pcie_clear_root_pme_status(struct pci_dev *dev)
2298{
2299	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2300}
2301
2302/**
2303 * pci_check_pme_status - Check if given device has generated PME.
2304 * @dev: Device to check.
2305 *
2306 * Check the PME status of the device and if set, clear it and clear PME enable
2307 * (if set).  Return 'true' if PME status and PME enable were both set or
2308 * 'false' otherwise.
2309 */
2310bool pci_check_pme_status(struct pci_dev *dev)
2311{
2312	int pmcsr_pos;
2313	u16 pmcsr;
2314	bool ret = false;
2315
2316	if (!dev->pm_cap)
2317		return false;
2318
2319	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2320	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2321	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2322		return false;
2323
2324	/* Clear PME status. */
2325	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2326	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2327		/* Disable PME to avoid interrupt flood. */
2328		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2329		ret = true;
2330	}
2331
2332	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2333
2334	return ret;
2335}
2336
2337/**
2338 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2339 * @dev: Device to handle.
2340 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2341 *
2342 * Check if @dev has generated PME and queue a resume request for it in that
2343 * case.
2344 */
2345static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2346{
2347	if (pme_poll_reset && dev->pme_poll)
2348		dev->pme_poll = false;
2349
2350	if (pci_check_pme_status(dev)) {
2351		pci_wakeup_event(dev);
2352		pm_request_resume(&dev->dev);
2353	}
2354	return 0;
2355}
2356
2357/**
2358 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2359 * @bus: Top bus of the subtree to walk.
2360 */
2361void pci_pme_wakeup_bus(struct pci_bus *bus)
2362{
2363	if (bus)
2364		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2365}
2366
2367
2368/**
2369 * pci_pme_capable - check the capability of PCI device to generate PME#
2370 * @dev: PCI device to handle.
2371 * @state: PCI state from which device will issue PME#.
2372 */
2373bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2374{
2375	if (!dev->pm_cap)
2376		return false;
2377
2378	return !!(dev->pme_support & (1 << state));
2379}
2380EXPORT_SYMBOL(pci_pme_capable);
2381
2382static void pci_pme_list_scan(struct work_struct *work)
2383{
2384	struct pci_pme_device *pme_dev, *n;
2385
2386	mutex_lock(&pci_pme_list_mutex);
2387	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2388		if (pme_dev->dev->pme_poll) {
2389			struct pci_dev *bridge;
2390
2391			bridge = pme_dev->dev->bus->self;
2392			/*
2393			 * If bridge is in low power state, the
2394			 * configuration space of subordinate devices
2395			 * may be not accessible
2396			 */
2397			if (bridge && bridge->current_state != PCI_D0)
2398				continue;
2399			/*
2400			 * If the device is in D3cold it should not be
2401			 * polled either.
2402			 */
2403			if (pme_dev->dev->current_state == PCI_D3cold)
2404				continue;
2405
2406			pci_pme_wakeup(pme_dev->dev, NULL);
2407		} else {
2408			list_del(&pme_dev->list);
2409			kfree(pme_dev);
2410		}
 
 
 
2411	}
2412	if (!list_empty(&pci_pme_list))
2413		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2414				   msecs_to_jiffies(PME_TIMEOUT));
2415	mutex_unlock(&pci_pme_list_mutex);
2416}
2417
2418static void __pci_pme_active(struct pci_dev *dev, bool enable)
 
 
 
 
 
 
 
 
2419{
2420	u16 pmcsr;
2421
2422	if (!dev->pme_support)
2423		return;
2424
2425	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2426	/* Clear PME_Status by writing 1 to it and enable PME# */
2427	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2428	if (!enable)
2429		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2430
2431	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2432}
2433
2434/**
2435 * pci_pme_restore - Restore PME configuration after config space restore.
2436 * @dev: PCI device to update.
2437 */
2438void pci_pme_restore(struct pci_dev *dev)
2439{
2440	u16 pmcsr;
2441
2442	if (!dev->pme_support)
2443		return;
2444
2445	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2446	if (dev->wakeup_prepared) {
2447		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2448		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2449	} else {
2450		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2451		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2452	}
2453	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2454}
2455
2456/**
2457 * pci_pme_active - enable or disable PCI device's PME# function
2458 * @dev: PCI device to handle.
2459 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2460 *
2461 * The caller must verify that the device is capable of generating PME# before
2462 * calling this function with @enable equal to 'true'.
2463 */
2464void pci_pme_active(struct pci_dev *dev, bool enable)
2465{
2466	__pci_pme_active(dev, enable);
2467
2468	/*
2469	 * PCI (as opposed to PCIe) PME requires that the device have
2470	 * its PME# line hooked up correctly. Not all hardware vendors
2471	 * do this, so the PME never gets delivered and the device
2472	 * remains asleep. The easiest way around this is to
2473	 * periodically walk the list of suspended devices and check
2474	 * whether any have their PME flag set. The assumption is that
2475	 * we'll wake up often enough anyway that this won't be a huge
2476	 * hit, and the power savings from the devices will still be a
2477	 * win.
2478	 *
2479	 * Although PCIe uses in-band PME message instead of PME# line
2480	 * to report PME, PME does not work for some PCIe devices in
2481	 * reality.  For example, there are devices that set their PME
2482	 * status bits, but don't really bother to send a PME message;
2483	 * there are PCI Express Root Ports that don't bother to
2484	 * trigger interrupts when they receive PME messages from the
2485	 * devices below.  So PME poll is used for PCIe devices too.
2486	 */
2487
2488	if (dev->pme_poll) {
2489		struct pci_pme_device *pme_dev;
2490		if (enable) {
2491			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2492					  GFP_KERNEL);
2493			if (!pme_dev) {
2494				pci_warn(dev, "can't enable PME#\n");
2495				return;
2496			}
2497			pme_dev->dev = dev;
2498			mutex_lock(&pci_pme_list_mutex);
2499			list_add(&pme_dev->list, &pci_pme_list);
2500			if (list_is_singular(&pci_pme_list))
2501				queue_delayed_work(system_freezable_wq,
2502						   &pci_pme_work,
2503						   msecs_to_jiffies(PME_TIMEOUT));
2504			mutex_unlock(&pci_pme_list_mutex);
2505		} else {
2506			mutex_lock(&pci_pme_list_mutex);
2507			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2508				if (pme_dev->dev == dev) {
2509					list_del(&pme_dev->list);
2510					kfree(pme_dev);
2511					break;
2512				}
2513			}
2514			mutex_unlock(&pci_pme_list_mutex);
2515		}
2516	}
2517
2518	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
 
2519}
2520EXPORT_SYMBOL(pci_pme_active);
2521
2522/**
2523 * __pci_enable_wake - enable PCI device as wakeup event source
2524 * @dev: PCI device affected
2525 * @state: PCI state from which device will issue wakeup events
 
2526 * @enable: True to enable event generation; false to disable
2527 *
2528 * This enables the device as a wakeup event source, or disables it.
2529 * When such events involves platform-specific hooks, those hooks are
2530 * called automatically by this routine.
2531 *
2532 * Devices with legacy power management (no standard PCI PM capabilities)
2533 * always require such platform hooks.
2534 *
2535 * RETURN VALUE:
2536 * 0 is returned on success
2537 * -EINVAL is returned if device is not supposed to wake up the system
2538 * Error code depending on the platform is returned if both the platform and
2539 * the native mechanism fail to enable the generation of wake-up events
2540 */
2541static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
 
2542{
2543	int ret = 0;
2544
2545	/*
2546	 * Bridges that are not power-manageable directly only signal
2547	 * wakeup on behalf of subordinate devices which is set up
2548	 * elsewhere, so skip them. However, bridges that are
2549	 * power-manageable may signal wakeup for themselves (for example,
2550	 * on a hotplug event) and they need to be covered here.
2551	 */
2552	if (!pci_power_manageable(dev))
2553		return 0;
2554
2555	/* Don't do the same thing twice in a row for one device. */
2556	if (!!enable == !!dev->wakeup_prepared)
2557		return 0;
2558
2559	/*
2560	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2561	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2562	 * enable.  To disable wake-up we call the platform first, for symmetry.
2563	 */
2564
2565	if (enable) {
2566		int error;
2567
2568		/*
2569		 * Enable PME signaling if the device can signal PME from
2570		 * D3cold regardless of whether or not it can signal PME from
2571		 * the current target state, because that will allow it to
2572		 * signal PME when the hierarchy above it goes into D3cold and
2573		 * the device itself ends up in D3cold as a result of that.
2574		 */
2575		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2576			pci_pme_active(dev, true);
2577		else
2578			ret = 1;
2579		error = platform_pci_set_wakeup(dev, true);
 
2580		if (ret)
2581			ret = error;
2582		if (!ret)
2583			dev->wakeup_prepared = true;
2584	} else {
2585		platform_pci_set_wakeup(dev, false);
 
 
 
2586		pci_pme_active(dev, false);
2587		dev->wakeup_prepared = false;
2588	}
2589
2590	return ret;
2591}
2592
2593/**
2594 * pci_enable_wake - change wakeup settings for a PCI device
2595 * @pci_dev: Target device
2596 * @state: PCI state from which device will issue wakeup events
2597 * @enable: Whether or not to enable event generation
2598 *
2599 * If @enable is set, check device_may_wakeup() for the device before calling
2600 * __pci_enable_wake() for it.
2601 */
2602int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2603{
2604	if (enable && !device_may_wakeup(&pci_dev->dev))
2605		return -EINVAL;
2606
2607	return __pci_enable_wake(pci_dev, state, enable);
2608}
2609EXPORT_SYMBOL(pci_enable_wake);
2610
2611/**
2612 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2613 * @dev: PCI device to prepare
2614 * @enable: True to enable wake-up event generation; false to disable
2615 *
2616 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2617 * and this function allows them to set that up cleanly - pci_enable_wake()
2618 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2619 * ordering constraints.
2620 *
2621 * This function only returns error code if the device is not allowed to wake
2622 * up the system from sleep or it is not capable of generating PME# from both
2623 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2624 */
2625int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2626{
2627	return pci_pme_capable(dev, PCI_D3cold) ?
2628			pci_enable_wake(dev, PCI_D3cold, enable) :
2629			pci_enable_wake(dev, PCI_D3hot, enable);
2630}
2631EXPORT_SYMBOL(pci_wake_from_d3);
2632
2633/**
2634 * pci_target_state - find an appropriate low power state for a given PCI dev
2635 * @dev: PCI device
2636 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2637 *
2638 * Use underlying platform code to find a supported low power state for @dev.
2639 * If the platform can't manage @dev, return the deepest state from which it
2640 * can generate wake events, based on any available PME info.
2641 */
2642static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2643{
 
 
2644	if (platform_pci_power_manageable(dev)) {
2645		/*
2646		 * Call the platform to find the target state for the device.
 
2647		 */
2648		pci_power_t state = platform_pci_choose_state(dev);
2649
2650		switch (state) {
2651		case PCI_POWER_ERROR:
2652		case PCI_UNKNOWN:
2653			return PCI_D3hot;
2654
2655		case PCI_D1:
2656		case PCI_D2:
2657			if (pci_no_d1d2(dev))
2658				return PCI_D3hot;
2659		}
2660
2661		return state;
2662	}
2663
2664	/*
2665	 * If the device is in D3cold even though it's not power-manageable by
2666	 * the platform, it may have been powered down by non-standard means.
2667	 * Best to let it slumber.
2668	 */
2669	if (dev->current_state == PCI_D3cold)
2670		return PCI_D3cold;
2671	else if (!dev->pm_cap)
2672		return PCI_D0;
2673
2674	if (wakeup && dev->pme_support) {
2675		pci_power_t state = PCI_D3hot;
2676
2677		/*
2678		 * Find the deepest state from which the device can generate
2679		 * PME#.
 
2680		 */
2681		while (state && !(dev->pme_support & (1 << state)))
2682			state--;
2683
2684		if (state)
2685			return state;
2686		else if (dev->pme_support & 1)
2687			return PCI_D0;
2688	}
2689
2690	return PCI_D3hot;
2691}
2692
2693/**
2694 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2695 *			  into a sleep state
2696 * @dev: Device to handle.
2697 *
2698 * Choose the power state appropriate for the device depending on whether
2699 * it can wake up the system and/or is power manageable by the platform
2700 * (PCI_D3hot is the default) and put the device into that state.
2701 */
2702int pci_prepare_to_sleep(struct pci_dev *dev)
2703{
2704	bool wakeup = device_may_wakeup(&dev->dev);
2705	pci_power_t target_state = pci_target_state(dev, wakeup);
2706	int error;
2707
2708	if (target_state == PCI_POWER_ERROR)
2709		return -EIO;
2710
2711	pci_enable_wake(dev, target_state, wakeup);
2712
2713	error = pci_set_power_state(dev, target_state);
2714
2715	if (error)
2716		pci_enable_wake(dev, target_state, false);
2717
2718	return error;
2719}
2720EXPORT_SYMBOL(pci_prepare_to_sleep);
2721
2722/**
2723 * pci_back_from_sleep - turn PCI device on during system-wide transition
2724 *			 into working state
2725 * @dev: Device to handle.
2726 *
2727 * Disable device's system wake-up capability and put it into D0.
2728 */
2729int pci_back_from_sleep(struct pci_dev *dev)
2730{
2731	int ret = pci_set_power_state(dev, PCI_D0);
2732
2733	if (ret)
2734		return ret;
2735
2736	pci_enable_wake(dev, PCI_D0, false);
2737	return 0;
2738}
2739EXPORT_SYMBOL(pci_back_from_sleep);
2740
2741/**
2742 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2743 * @dev: PCI device being suspended.
2744 *
2745 * Prepare @dev to generate wake-up events at run time and put it into a low
2746 * power state.
2747 */
2748int pci_finish_runtime_suspend(struct pci_dev *dev)
2749{
2750	pci_power_t target_state;
2751	int error;
2752
2753	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2754	if (target_state == PCI_POWER_ERROR)
2755		return -EIO;
2756
2757	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2758
2759	error = pci_set_power_state(dev, target_state);
2760
2761	if (error)
2762		pci_enable_wake(dev, target_state, false);
2763
2764	return error;
2765}
2766
2767/**
2768 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2769 * @dev: Device to check.
2770 *
2771 * Return true if the device itself is capable of generating wake-up events
2772 * (through the platform or using the native PCIe PME) or if the device supports
2773 * PME and one of its upstream bridges can generate wake-up events.
2774 */
2775bool pci_dev_run_wake(struct pci_dev *dev)
2776{
2777	struct pci_bus *bus = dev->bus;
2778
 
 
 
2779	if (!dev->pme_support)
2780		return false;
2781
2782	/* PME-capable in principle, but not from the target power state */
2783	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2784		return false;
2785
2786	if (device_can_wakeup(&dev->dev))
2787		return true;
2788
2789	while (bus->parent) {
2790		struct pci_dev *bridge = bus->self;
2791
2792		if (device_can_wakeup(&bridge->dev))
2793			return true;
2794
2795		bus = bus->parent;
2796	}
2797
2798	/* We have reached the root bus. */
2799	if (bus->bridge)
2800		return device_can_wakeup(bus->bridge);
2801
2802	return false;
2803}
2804EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2805
2806/**
2807 * pci_dev_need_resume - Check if it is necessary to resume the device.
2808 * @pci_dev: Device to check.
2809 *
2810 * Return 'true' if the device is not runtime-suspended or it has to be
2811 * reconfigured due to wakeup settings difference between system and runtime
2812 * suspend, or the current power state of it is not suitable for the upcoming
2813 * (system-wide) transition.
2814 */
2815bool pci_dev_need_resume(struct pci_dev *pci_dev)
2816{
2817	struct device *dev = &pci_dev->dev;
2818	pci_power_t target_state;
2819
2820	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2821		return true;
2822
2823	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2824
2825	/*
2826	 * If the earlier platform check has not triggered, D3cold is just power
2827	 * removal on top of D3hot, so no need to resume the device in that
2828	 * case.
2829	 */
2830	return target_state != pci_dev->current_state &&
2831		target_state != PCI_D3cold &&
2832		pci_dev->current_state != PCI_D3hot;
2833}
2834
2835/**
2836 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2837 * @pci_dev: Device to check.
2838 *
2839 * If the device is suspended and it is not configured for system wakeup,
2840 * disable PME for it to prevent it from waking up the system unnecessarily.
2841 *
2842 * Note that if the device's power state is D3cold and the platform check in
2843 * pci_dev_need_resume() has not triggered, the device's configuration need not
2844 * be changed.
2845 */
2846void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2847{
2848	struct device *dev = &pci_dev->dev;
2849
2850	spin_lock_irq(&dev->power.lock);
2851
2852	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2853	    pci_dev->current_state < PCI_D3cold)
2854		__pci_pme_active(pci_dev, false);
2855
2856	spin_unlock_irq(&dev->power.lock);
2857}
2858
2859/**
2860 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2861 * @pci_dev: Device to handle.
2862 *
2863 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2864 * it might have been disabled during the prepare phase of system suspend if
2865 * the device was not configured for system wakeup.
2866 */
2867void pci_dev_complete_resume(struct pci_dev *pci_dev)
2868{
2869	struct device *dev = &pci_dev->dev;
2870
2871	if (!pci_dev_run_wake(pci_dev))
2872		return;
2873
2874	spin_lock_irq(&dev->power.lock);
2875
2876	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2877		__pci_pme_active(pci_dev, true);
2878
2879	spin_unlock_irq(&dev->power.lock);
2880}
2881
2882/**
2883 * pci_choose_state - Choose the power state of a PCI device.
2884 * @dev: Target PCI device.
2885 * @state: Target state for the whole system.
2886 *
2887 * Returns PCI power state suitable for @dev and @state.
2888 */
2889pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
2890{
2891	if (state.event == PM_EVENT_ON)
2892		return PCI_D0;
2893
2894	return pci_target_state(dev, false);
2895}
2896EXPORT_SYMBOL(pci_choose_state);
2897
2898void pci_config_pm_runtime_get(struct pci_dev *pdev)
2899{
2900	struct device *dev = &pdev->dev;
2901	struct device *parent = dev->parent;
2902
2903	if (parent)
2904		pm_runtime_get_sync(parent);
2905	pm_runtime_get_noresume(dev);
2906	/*
2907	 * pdev->current_state is set to PCI_D3cold during suspending,
2908	 * so wait until suspending completes
2909	 */
2910	pm_runtime_barrier(dev);
2911	/*
2912	 * Only need to resume devices in D3cold, because config
2913	 * registers are still accessible for devices suspended but
2914	 * not in D3cold.
2915	 */
2916	if (pdev->current_state == PCI_D3cold)
2917		pm_runtime_resume(dev);
2918}
2919
2920void pci_config_pm_runtime_put(struct pci_dev *pdev)
2921{
2922	struct device *dev = &pdev->dev;
2923	struct device *parent = dev->parent;
2924
2925	pm_runtime_put(dev);
2926	if (parent)
2927		pm_runtime_put_sync(parent);
2928}
2929
2930static const struct dmi_system_id bridge_d3_blacklist[] = {
2931#ifdef CONFIG_X86
2932	{
2933		/*
2934		 * Gigabyte X299 root port is not marked as hotplug capable
2935		 * which allows Linux to power manage it.  However, this
2936		 * confuses the BIOS SMI handler so don't power manage root
2937		 * ports on that system.
2938		 */
2939		.ident = "X299 DESIGNARE EX-CF",
2940		.matches = {
2941			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2942			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2943		},
2944	},
2945	{
2946		/*
2947		 * Downstream device is not accessible after putting a root port
2948		 * into D3cold and back into D0 on Elo i2.
2949		 */
2950		.ident = "Elo i2",
2951		.matches = {
2952			DMI_MATCH(DMI_SYS_VENDOR, "Elo Touch Solutions"),
2953			DMI_MATCH(DMI_PRODUCT_NAME, "Elo i2"),
2954			DMI_MATCH(DMI_PRODUCT_VERSION, "RevB"),
2955		},
2956	},
2957#endif
2958	{ }
2959};
2960
2961/**
2962 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2963 * @bridge: Bridge to check
2964 *
2965 * This function checks if it is possible to move the bridge to D3.
2966 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2967 */
2968bool pci_bridge_d3_possible(struct pci_dev *bridge)
2969{
2970	if (!pci_is_pcie(bridge))
2971		return false;
2972
2973	switch (pci_pcie_type(bridge)) {
2974	case PCI_EXP_TYPE_ROOT_PORT:
2975	case PCI_EXP_TYPE_UPSTREAM:
2976	case PCI_EXP_TYPE_DOWNSTREAM:
2977		if (pci_bridge_d3_disable)
2978			return false;
2979
2980		/*
2981		 * Hotplug ports handled by firmware in System Management Mode
2982		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2983		 */
2984		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2985			return false;
2986
2987		if (pci_bridge_d3_force)
2988			return true;
2989
2990		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2991		if (bridge->is_thunderbolt)
2992			return true;
2993
2994		/* Platform might know better if the bridge supports D3 */
2995		if (platform_pci_bridge_d3(bridge))
2996			return true;
2997
2998		/*
2999		 * Hotplug ports handled natively by the OS were not validated
3000		 * by vendors for runtime D3 at least until 2018 because there
3001		 * was no OS support.
3002		 */
3003		if (bridge->is_hotplug_bridge)
3004			return false;
3005
3006		if (dmi_check_system(bridge_d3_blacklist))
3007			return false;
3008
3009		/*
3010		 * It should be safe to put PCIe ports from 2015 or newer
3011		 * to D3.
3012		 */
3013		if (dmi_get_bios_year() >= 2015)
3014			return true;
3015		break;
3016	}
3017
3018	return false;
3019}
3020
3021static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
3022{
3023	bool *d3cold_ok = data;
3024
3025	if (/* The device needs to be allowed to go D3cold ... */
3026	    dev->no_d3cold || !dev->d3cold_allowed ||
3027
3028	    /* ... and if it is wakeup capable to do so from D3cold. */
3029	    (device_may_wakeup(&dev->dev) &&
3030	     !pci_pme_capable(dev, PCI_D3cold)) ||
3031
3032	    /* If it is a bridge it must be allowed to go to D3. */
3033	    !pci_power_manageable(dev))
3034
3035		*d3cold_ok = false;
3036
3037	return !*d3cold_ok;
3038}
3039
3040/*
3041 * pci_bridge_d3_update - Update bridge D3 capabilities
3042 * @dev: PCI device which is changed
3043 *
3044 * Update upstream bridge PM capabilities accordingly depending on if the
3045 * device PM configuration was changed or the device is being removed.  The
3046 * change is also propagated upstream.
3047 */
3048void pci_bridge_d3_update(struct pci_dev *dev)
3049{
3050	bool remove = !device_is_registered(&dev->dev);
3051	struct pci_dev *bridge;
3052	bool d3cold_ok = true;
3053
3054	bridge = pci_upstream_bridge(dev);
3055	if (!bridge || !pci_bridge_d3_possible(bridge))
3056		return;
3057
3058	/*
3059	 * If D3 is currently allowed for the bridge, removing one of its
3060	 * children won't change that.
3061	 */
3062	if (remove && bridge->bridge_d3)
3063		return;
3064
3065	/*
3066	 * If D3 is currently allowed for the bridge and a child is added or
3067	 * changed, disallowance of D3 can only be caused by that child, so
3068	 * we only need to check that single device, not any of its siblings.
3069	 *
3070	 * If D3 is currently not allowed for the bridge, checking the device
3071	 * first may allow us to skip checking its siblings.
3072	 */
3073	if (!remove)
3074		pci_dev_check_d3cold(dev, &d3cold_ok);
3075
3076	/*
3077	 * If D3 is currently not allowed for the bridge, this may be caused
3078	 * either by the device being changed/removed or any of its siblings,
3079	 * so we need to go through all children to find out if one of them
3080	 * continues to block D3.
3081	 */
3082	if (d3cold_ok && !bridge->bridge_d3)
3083		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3084			     &d3cold_ok);
3085
3086	if (bridge->bridge_d3 != d3cold_ok) {
3087		bridge->bridge_d3 = d3cold_ok;
3088		/* Propagate change to upstream bridges */
3089		pci_bridge_d3_update(bridge);
3090	}
3091}
3092
3093/**
3094 * pci_d3cold_enable - Enable D3cold for device
3095 * @dev: PCI device to handle
3096 *
3097 * This function can be used in drivers to enable D3cold from the device
3098 * they handle.  It also updates upstream PCI bridge PM capabilities
3099 * accordingly.
3100 */
3101void pci_d3cold_enable(struct pci_dev *dev)
3102{
3103	if (dev->no_d3cold) {
3104		dev->no_d3cold = false;
3105		pci_bridge_d3_update(dev);
3106	}
3107}
3108EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3109
3110/**
3111 * pci_d3cold_disable - Disable D3cold for device
3112 * @dev: PCI device to handle
3113 *
3114 * This function can be used in drivers to disable D3cold from the device
3115 * they handle.  It also updates upstream PCI bridge PM capabilities
3116 * accordingly.
3117 */
3118void pci_d3cold_disable(struct pci_dev *dev)
3119{
3120	if (!dev->no_d3cold) {
3121		dev->no_d3cold = true;
3122		pci_bridge_d3_update(dev);
3123	}
3124}
3125EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3126
3127/**
3128 * pci_pm_init - Initialize PM functions of given PCI device
3129 * @dev: PCI device to handle.
3130 */
3131void pci_pm_init(struct pci_dev *dev)
3132{
3133	int pm;
3134	u16 status;
3135	u16 pmc;
3136
3137	pm_runtime_forbid(&dev->dev);
3138	pm_runtime_set_active(&dev->dev);
3139	pm_runtime_enable(&dev->dev);
3140	device_enable_async_suspend(&dev->dev);
3141	dev->wakeup_prepared = false;
3142
3143	dev->pm_cap = 0;
3144	dev->pme_support = 0;
3145
3146	/* find PCI PM capability in list */
3147	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3148	if (!pm)
3149		return;
3150	/* Check device's ability to generate PME# */
3151	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3152
3153	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3154		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3155			pmc & PCI_PM_CAP_VER_MASK);
3156		return;
3157	}
3158
3159	dev->pm_cap = pm;
3160	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3161	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3162	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3163	dev->d3cold_allowed = true;
3164
3165	dev->d1_support = false;
3166	dev->d2_support = false;
3167	if (!pci_no_d1d2(dev)) {
3168		if (pmc & PCI_PM_CAP_D1)
3169			dev->d1_support = true;
3170		if (pmc & PCI_PM_CAP_D2)
3171			dev->d2_support = true;
3172
3173		if (dev->d1_support || dev->d2_support)
3174			pci_info(dev, "supports%s%s\n",
3175				   dev->d1_support ? " D1" : "",
3176				   dev->d2_support ? " D2" : "");
3177	}
3178
3179	pmc &= PCI_PM_CAP_PME_MASK;
3180	if (pmc) {
3181		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
 
3182			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3183			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3184			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3185			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3186			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3187		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3188		dev->pme_poll = true;
3189		/*
3190		 * Make device's PM flags reflect the wake-up capability, but
3191		 * let the user space enable it to wake up the system as needed.
3192		 */
3193		device_set_wakeup_capable(&dev->dev, true);
3194		/* Disable the PME# generation functionality */
3195		pci_pme_active(dev, false);
 
 
3196	}
3197
3198	pci_read_config_word(dev, PCI_STATUS, &status);
3199	if (status & PCI_STATUS_IMM_READY)
3200		dev->imm_ready = 1;
3201}
3202
3203static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
3204{
3205	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3206
3207	switch (prop) {
3208	case PCI_EA_P_MEM:
3209	case PCI_EA_P_VF_MEM:
3210		flags |= IORESOURCE_MEM;
3211		break;
3212	case PCI_EA_P_MEM_PREFETCH:
3213	case PCI_EA_P_VF_MEM_PREFETCH:
3214		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3215		break;
3216	case PCI_EA_P_IO:
3217		flags |= IORESOURCE_IO;
3218		break;
3219	default:
3220		return 0;
3221	}
3222
3223	return flags;
3224}
3225
3226static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3227					    u8 prop)
3228{
3229	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3230		return &dev->resource[bei];
3231#ifdef CONFIG_PCI_IOV
3232	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3233		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3234		return &dev->resource[PCI_IOV_RESOURCES +
3235				      bei - PCI_EA_BEI_VF_BAR0];
3236#endif
3237	else if (bei == PCI_EA_BEI_ROM)
3238		return &dev->resource[PCI_ROM_RESOURCE];
3239	else
3240		return NULL;
3241}
3242
3243/* Read an Enhanced Allocation (EA) entry */
3244static int pci_ea_read(struct pci_dev *dev, int offset)
3245{
3246	struct resource *res;
3247	int ent_size, ent_offset = offset;
3248	resource_size_t start, end;
3249	unsigned long flags;
3250	u32 dw0, bei, base, max_offset;
3251	u8 prop;
3252	bool support_64 = (sizeof(resource_size_t) >= 8);
3253
3254	pci_read_config_dword(dev, ent_offset, &dw0);
3255	ent_offset += 4;
3256
3257	/* Entry size field indicates DWORDs after 1st */
3258	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3259
3260	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3261		goto out;
3262
3263	bei = (dw0 & PCI_EA_BEI) >> 4;
3264	prop = (dw0 & PCI_EA_PP) >> 8;
3265
3266	/*
3267	 * If the Property is in the reserved range, try the Secondary
3268	 * Property instead.
3269	 */
3270	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3271		prop = (dw0 & PCI_EA_SP) >> 16;
3272	if (prop > PCI_EA_P_BRIDGE_IO)
3273		goto out;
3274
3275	res = pci_ea_get_resource(dev, bei, prop);
3276	if (!res) {
3277		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3278		goto out;
3279	}
3280
3281	flags = pci_ea_flags(dev, prop);
3282	if (!flags) {
3283		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3284		goto out;
3285	}
3286
3287	/* Read Base */
3288	pci_read_config_dword(dev, ent_offset, &base);
3289	start = (base & PCI_EA_FIELD_MASK);
3290	ent_offset += 4;
3291
3292	/* Read MaxOffset */
3293	pci_read_config_dword(dev, ent_offset, &max_offset);
3294	ent_offset += 4;
3295
3296	/* Read Base MSBs (if 64-bit entry) */
3297	if (base & PCI_EA_IS_64) {
3298		u32 base_upper;
3299
3300		pci_read_config_dword(dev, ent_offset, &base_upper);
3301		ent_offset += 4;
3302
3303		flags |= IORESOURCE_MEM_64;
3304
3305		/* entry starts above 32-bit boundary, can't use */
3306		if (!support_64 && base_upper)
3307			goto out;
3308
3309		if (support_64)
3310			start |= ((u64)base_upper << 32);
3311	}
3312
3313	end = start + (max_offset | 0x03);
3314
3315	/* Read MaxOffset MSBs (if 64-bit entry) */
3316	if (max_offset & PCI_EA_IS_64) {
3317		u32 max_offset_upper;
3318
3319		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3320		ent_offset += 4;
3321
3322		flags |= IORESOURCE_MEM_64;
3323
3324		/* entry too big, can't use */
3325		if (!support_64 && max_offset_upper)
3326			goto out;
3327
3328		if (support_64)
3329			end += ((u64)max_offset_upper << 32);
3330	}
3331
3332	if (end < start) {
3333		pci_err(dev, "EA Entry crosses address boundary\n");
3334		goto out;
3335	}
3336
3337	if (ent_size != ent_offset - offset) {
3338		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3339			ent_size, ent_offset - offset);
3340		goto out;
3341	}
3342
3343	res->name = pci_name(dev);
3344	res->start = start;
3345	res->end = end;
3346	res->flags = flags;
3347
3348	if (bei <= PCI_EA_BEI_BAR5)
3349		pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3350			   bei, res, prop);
3351	else if (bei == PCI_EA_BEI_ROM)
3352		pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3353			   res, prop);
3354	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3355		pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3356			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
3357	else
3358		pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3359			   bei, res, prop);
3360
3361out:
3362	return offset + ent_size;
3363}
3364
3365/* Enhanced Allocation Initialization */
3366void pci_ea_init(struct pci_dev *dev)
3367{
3368	int ea;
3369	u8 num_ent;
3370	int offset;
3371	int i;
3372
3373	/* find PCI EA capability in list */
3374	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3375	if (!ea)
3376		return;
3377
3378	/* determine the number of entries */
3379	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3380					&num_ent);
3381	num_ent &= PCI_EA_NUM_ENT_MASK;
3382
3383	offset = ea + PCI_EA_FIRST_ENT;
3384
3385	/* Skip DWORD 2 for type 1 functions */
3386	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3387		offset += 4;
3388
3389	/* parse each EA entry */
3390	for (i = 0; i < num_ent; ++i)
3391		offset = pci_ea_read(dev, offset);
3392}
3393
3394static void pci_add_saved_cap(struct pci_dev *pci_dev,
3395	struct pci_cap_saved_state *new_cap)
3396{
3397	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3398}
3399
3400/**
3401 * _pci_add_cap_save_buffer - allocate buffer for saving given
3402 *			      capability registers
3403 * @dev: the PCI device
3404 * @cap: the capability to allocate the buffer for
3405 * @extended: Standard or Extended capability ID
3406 * @size: requested size of the buffer
3407 */
3408static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3409				    bool extended, unsigned int size)
3410{
3411	int pos;
3412	struct pci_cap_saved_state *save_state;
3413
3414	if (extended)
3415		pos = pci_find_ext_capability(dev, cap);
3416	else
3417		pos = pci_find_capability(dev, cap);
3418
3419	if (!pos)
3420		return 0;
3421
3422	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3423	if (!save_state)
3424		return -ENOMEM;
3425
3426	save_state->cap.cap_nr = cap;
3427	save_state->cap.cap_extended = extended;
3428	save_state->cap.size = size;
3429	pci_add_saved_cap(dev, save_state);
3430
3431	return 0;
3432}
3433
3434int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3435{
3436	return _pci_add_cap_save_buffer(dev, cap, false, size);
3437}
3438
3439int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3440{
3441	return _pci_add_cap_save_buffer(dev, cap, true, size);
3442}
3443
3444/**
3445 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3446 * @dev: the PCI device
3447 */
3448void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3449{
3450	int error;
3451
3452	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3453					PCI_EXP_SAVE_REGS * sizeof(u16));
3454	if (error)
3455		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
 
3456
3457	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3458	if (error)
3459		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3460
3461	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3462					    2 * sizeof(u16));
3463	if (error)
3464		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3465
3466	pci_allocate_vc_save_buffers(dev);
3467}
3468
3469void pci_free_cap_save_buffers(struct pci_dev *dev)
3470{
3471	struct pci_cap_saved_state *tmp;
3472	struct hlist_node *n;
3473
3474	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3475		kfree(tmp);
3476}
3477
3478/**
3479 * pci_configure_ari - enable or disable ARI forwarding
3480 * @dev: the PCI device
3481 *
3482 * If @dev and its upstream bridge both support ARI, enable ARI in the
3483 * bridge.  Otherwise, disable ARI in the bridge.
3484 */
3485void pci_configure_ari(struct pci_dev *dev)
3486{
 
3487	u32 cap;
 
3488	struct pci_dev *bridge;
3489
3490	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3491		return;
3492
 
 
 
 
3493	bridge = dev->bus->self;
3494	if (!bridge)
 
 
 
 
 
 
 
 
 
3495		return;
3496
3497	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3498	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3499		return;
3500
3501	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3502		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3503					 PCI_EXP_DEVCTL2_ARI);
3504		bridge->ari_enabled = 1;
3505	} else {
3506		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3507					   PCI_EXP_DEVCTL2_ARI);
3508		bridge->ari_enabled = 0;
3509	}
3510}
3511
3512static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
 
 
 
 
 
 
 
 
 
3513{
3514	int pos;
3515	u16 cap, ctrl;
3516
3517	pos = pdev->acs_cap;
3518	if (!pos)
3519		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3520
3521	/*
3522	 * Except for egress control, capabilities are either required
3523	 * or only required if controllable.  Features missing from the
3524	 * capability field can therefore be assumed as hard-wired enabled.
3525	 */
3526	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3527	acs_flags &= (cap | PCI_ACS_EC);
3528
3529	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3530	return (ctrl & acs_flags) == acs_flags;
 
 
 
 
3531}
 
3532
3533/**
3534 * pci_acs_enabled - test ACS against required flags for a given device
3535 * @pdev: device to test
3536 * @acs_flags: required PCI ACS flags
3537 *
3538 * Return true if the device supports the provided flags.  Automatically
3539 * filters out flags that are not implemented on multifunction devices.
 
 
 
 
 
 
 
 
3540 *
3541 * Note that this interface checks the effective ACS capabilities of the
3542 * device rather than the actual capabilities.  For instance, most single
3543 * function endpoints are not required to support ACS because they have no
3544 * opportunity for peer-to-peer access.  We therefore return 'true'
3545 * regardless of whether the device exposes an ACS capability.  This makes
3546 * it much easier for callers of this function to ignore the actual type
3547 * or topology of the device when testing ACS support.
3548 */
3549bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3550{
 
 
 
3551	int ret;
3552
3553	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3554	if (ret >= 0)
3555		return ret > 0;
3556
3557	/*
3558	 * Conventional PCI and PCI-X devices never support ACS, either
3559	 * effectively or actually.  The shared bus topology implies that
3560	 * any device on the bus can receive or snoop DMA.
3561	 */
3562	if (!pci_is_pcie(pdev))
3563		return false;
 
 
 
 
 
 
 
3564
3565	switch (pci_pcie_type(pdev)) {
3566	/*
3567	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3568	 * but since their primary interface is PCI/X, we conservatively
3569	 * handle them as we would a non-PCIe device.
3570	 */
3571	case PCI_EXP_TYPE_PCIE_BRIDGE:
3572	/*
3573	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3574	 * applicable... must never implement an ACS Extended Capability...".
3575	 * This seems arbitrary, but we take a conservative interpretation
3576	 * of this statement.
3577	 */
3578	case PCI_EXP_TYPE_PCI_BRIDGE:
3579	case PCI_EXP_TYPE_RC_EC:
3580		return false;
3581	/*
3582	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3583	 * implement ACS in order to indicate their peer-to-peer capabilities,
3584	 * regardless of whether they are single- or multi-function devices.
3585	 */
3586	case PCI_EXP_TYPE_DOWNSTREAM:
3587	case PCI_EXP_TYPE_ROOT_PORT:
3588		return pci_acs_flags_enabled(pdev, acs_flags);
3589	/*
3590	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3591	 * implemented by the remaining PCIe types to indicate peer-to-peer
3592	 * capabilities, but only when they are part of a multifunction
3593	 * device.  The footnote for section 6.12 indicates the specific
3594	 * PCIe types included here.
3595	 */
3596	case PCI_EXP_TYPE_ENDPOINT:
3597	case PCI_EXP_TYPE_UPSTREAM:
3598	case PCI_EXP_TYPE_LEG_END:
3599	case PCI_EXP_TYPE_RC_END:
3600		if (!pdev->multifunction)
3601			break;
3602
3603		return pci_acs_flags_enabled(pdev, acs_flags);
 
 
3604	}
 
3605
3606	/*
3607	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3608	 * to single function devices with the exception of downstream ports.
3609	 */
3610	return true;
3611}
 
3612
3613/**
3614 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3615 * @start: starting downstream device
3616 * @end: ending upstream device or NULL to search to the root bus
3617 * @acs_flags: required flags
3618 *
3619 * Walk up a device tree from start to end testing PCI ACS support.  If
3620 * any step along the way does not support the required flags, return false.
3621 */
3622bool pci_acs_path_enabled(struct pci_dev *start,
3623			  struct pci_dev *end, u16 acs_flags)
3624{
3625	struct pci_dev *pdev, *parent = start;
 
3626
3627	do {
3628		pdev = parent;
3629
3630		if (!pci_acs_enabled(pdev, acs_flags))
3631			return false;
3632
3633		if (pci_is_root_bus(pdev->bus))
3634			return (end == NULL);
3635
3636		parent = pdev->bus->self;
3637	} while (pdev != end);
3638
3639	return true;
 
 
3640}
 
3641
3642/**
3643 * pci_acs_init - Initialize ACS if hardware supports it
3644 * @dev: the PCI device
 
 
 
3645 */
3646void pci_acs_init(struct pci_dev *dev)
3647{
3648	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
 
3649
3650	/*
3651	 * Attempt to enable ACS regardless of capability because some Root
3652	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3653	 * the standard ACS capability but still support ACS via those
3654	 * quirks.
3655	 */
3656	pci_enable_acs(dev);
 
 
 
3657}
 
3658
3659/**
3660 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3661 * @pdev: PCI device
3662 * @bar: BAR to find
3663 *
3664 * Helper to find the position of the ctrl register for a BAR.
3665 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3666 * Returns -ENOENT if no ctrl register for the BAR could be found.
 
 
3667 */
3668static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3669{
3670	unsigned int pos, nbars, i;
3671	u32 ctrl;
 
 
 
 
3672
3673	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3674	if (!pos)
3675		return -ENOTSUPP;
3676
3677	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3678	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3679		    PCI_REBAR_CTRL_NBAR_SHIFT;
3680
3681	for (i = 0; i < nbars; i++, pos += 8) {
3682		int bar_idx;
3683
3684		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3685		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3686		if (bar_idx == bar)
3687			return pos;
3688	}
3689
3690	return -ENOENT;
 
 
 
 
3691}
 
3692
3693/**
3694 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3695 * @pdev: PCI device
3696 * @bar: BAR to query
3697 *
3698 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3699 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3700 */
3701u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3702{
3703	int pos;
3704	u32 cap;
 
 
 
 
 
 
 
3705
3706	pos = pci_rebar_find_pos(pdev, bar);
3707	if (pos < 0)
3708		return 0;
3709
3710	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3711	cap &= PCI_REBAR_CAP_SIZES;
 
 
 
3712
3713	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3714	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3715	    bar == 0 && cap == 0x7000)
3716		cap = 0x3f000;
3717
3718	return cap >> 4;
 
 
 
 
3719}
3720EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3721
3722/**
3723 * pci_rebar_get_current_size - get the current size of a BAR
3724 * @pdev: PCI device
3725 * @bar: BAR to set size to
 
3726 *
3727 * Read the size of a BAR from the resizable BAR config.
3728 * Returns size if found or negative error code.
3729 */
3730int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3731{
3732	int pos;
3733	u32 ctrl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3734
3735	pos = pci_rebar_find_pos(pdev, bar);
3736	if (pos < 0)
3737		return pos;
 
3738
3739	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3740	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3741}
 
 
 
3742
3743/**
3744 * pci_rebar_set_size - set a new size for a BAR
3745 * @pdev: PCI device
3746 * @bar: BAR to set size to
3747 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3748 *
3749 * Set the new size of a BAR as defined in the spec.
3750 * Returns zero if resizing was successful, error code otherwise.
3751 */
3752int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3753{
3754	int pos;
3755	u32 ctrl;
3756
3757	pos = pci_rebar_find_pos(pdev, bar);
3758	if (pos < 0)
3759		return pos;
3760
3761	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3762	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3763	ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3764	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3765	return 0;
3766}
3767
3768/**
3769 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3770 * @dev: the PCI device
3771 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3772 *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3773 *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3774 *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3775 *
3776 * Return 0 if all upstream bridges support AtomicOp routing, egress
3777 * blocking is disabled on all upstream ports, and the root port supports
3778 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3779 * AtomicOp completion), or negative otherwise.
3780 */
3781int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3782{
3783	struct pci_bus *bus = dev->bus;
3784	struct pci_dev *bridge;
3785	u32 cap, ctl2;
3786
3787	/*
3788	 * Per PCIe r5.0, sec 9.3.5.10, the AtomicOp Requester Enable bit
3789	 * in Device Control 2 is reserved in VFs and the PF value applies
3790	 * to all associated VFs.
3791	 */
3792	if (dev->is_virtfn)
3793		return -EINVAL;
3794
3795	if (!pci_is_pcie(dev))
3796		return -EINVAL;
3797
3798	/*
3799	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3800	 * AtomicOp requesters.  For now, we only support endpoints as
3801	 * requesters and root ports as completers.  No endpoints as
3802	 * completers, and no peer-to-peer.
3803	 */
3804
3805	switch (pci_pcie_type(dev)) {
3806	case PCI_EXP_TYPE_ENDPOINT:
3807	case PCI_EXP_TYPE_LEG_END:
3808	case PCI_EXP_TYPE_RC_END:
3809		break;
3810	default:
3811		return -EINVAL;
3812	}
3813
3814	while (bus->parent) {
3815		bridge = bus->self;
3816
3817		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
 
3818
3819		switch (pci_pcie_type(bridge)) {
3820		/* Ensure switch ports support AtomicOp routing */
3821		case PCI_EXP_TYPE_UPSTREAM:
3822		case PCI_EXP_TYPE_DOWNSTREAM:
3823			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3824				return -EINVAL;
3825			break;
3826
3827		/* Ensure root port supports all the sizes we care about */
3828		case PCI_EXP_TYPE_ROOT_PORT:
3829			if ((cap & cap_mask) != cap_mask)
3830				return -EINVAL;
3831			break;
3832		}
3833
3834		/* Ensure upstream ports don't block AtomicOps on egress */
3835		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3836			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3837						   &ctl2);
3838			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3839				return -EINVAL;
3840		}
3841
3842		bus = bus->parent;
3843	}
3844
3845	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3846				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3847	return 0;
3848}
3849EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3850
3851/**
3852 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3853 * @dev: the PCI device
3854 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3855 *
3856 * Perform INTx swizzling for a device behind one level of bridge.  This is
3857 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3858 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3859 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3860 * the PCI Express Base Specification, Revision 2.1)
3861 */
3862u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3863{
3864	int slot;
3865
3866	if (pci_ari_enabled(dev->bus))
3867		slot = 0;
3868	else
3869		slot = PCI_SLOT(dev->devfn);
3870
3871	return (((pin - 1) + slot) % 4) + 1;
3872}
3873
3874int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
 
3875{
3876	u8 pin;
3877
3878	pin = dev->pin;
3879	if (!pin)
3880		return -1;
3881
3882	while (!pci_is_root_bus(dev->bus)) {
3883		pin = pci_swizzle_interrupt_pin(dev, pin);
3884		dev = dev->bus->self;
3885	}
3886	*bridge = dev;
3887	return pin;
3888}
3889
3890/**
3891 * pci_common_swizzle - swizzle INTx all the way to root bridge
3892 * @dev: the PCI device
3893 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3894 *
3895 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3896 * bridges all the way up to a PCI root bus.
3897 */
3898u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3899{
3900	u8 pin = *pinp;
3901
3902	while (!pci_is_root_bus(dev->bus)) {
3903		pin = pci_swizzle_interrupt_pin(dev, pin);
3904		dev = dev->bus->self;
3905	}
3906	*pinp = pin;
3907	return PCI_SLOT(dev->devfn);
3908}
3909EXPORT_SYMBOL_GPL(pci_common_swizzle);
3910
3911/**
3912 * pci_release_region - Release a PCI bar
3913 * @pdev: PCI device whose resources were previously reserved by
3914 *	  pci_request_region()
3915 * @bar: BAR to release
3916 *
3917 * Releases the PCI I/O and memory resources previously reserved by a
3918 * successful call to pci_request_region().  Call this function only
3919 * after all use of the PCI regions has ceased.
3920 */
3921void pci_release_region(struct pci_dev *pdev, int bar)
3922{
3923	struct pci_devres *dr;
3924
3925	if (pci_resource_len(pdev, bar) == 0)
3926		return;
3927	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3928		release_region(pci_resource_start(pdev, bar),
3929				pci_resource_len(pdev, bar));
3930	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3931		release_mem_region(pci_resource_start(pdev, bar),
3932				pci_resource_len(pdev, bar));
3933
3934	dr = find_pci_dr(pdev);
3935	if (dr)
3936		dr->region_mask &= ~(1 << bar);
3937}
3938EXPORT_SYMBOL(pci_release_region);
3939
3940/**
3941 * __pci_request_region - Reserved PCI I/O and memory resource
3942 * @pdev: PCI device whose resources are to be reserved
3943 * @bar: BAR to be reserved
3944 * @res_name: Name to be associated with resource.
3945 * @exclusive: whether the region access is exclusive or not
3946 *
3947 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3948 * being reserved by owner @res_name.  Do not access any
3949 * address inside the PCI regions unless this call returns
3950 * successfully.
3951 *
3952 * If @exclusive is set, then the region is marked so that userspace
3953 * is explicitly not allowed to map the resource via /dev/mem or
3954 * sysfs MMIO access.
3955 *
3956 * Returns 0 on success, or %EBUSY on error.  A warning
3957 * message is also printed on failure.
3958 */
3959static int __pci_request_region(struct pci_dev *pdev, int bar,
3960				const char *res_name, int exclusive)
3961{
3962	struct pci_devres *dr;
3963
3964	if (pci_resource_len(pdev, bar) == 0)
3965		return 0;
3966
3967	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3968		if (!request_region(pci_resource_start(pdev, bar),
3969			    pci_resource_len(pdev, bar), res_name))
3970			goto err_out;
3971	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 
3972		if (!__request_mem_region(pci_resource_start(pdev, bar),
3973					pci_resource_len(pdev, bar), res_name,
3974					exclusive))
3975			goto err_out;
3976	}
3977
3978	dr = find_pci_dr(pdev);
3979	if (dr)
3980		dr->region_mask |= 1 << bar;
3981
3982	return 0;
3983
3984err_out:
3985	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3986		 &pdev->resource[bar]);
3987	return -EBUSY;
3988}
3989
3990/**
3991 * pci_request_region - Reserve PCI I/O and memory resource
3992 * @pdev: PCI device whose resources are to be reserved
3993 * @bar: BAR to be reserved
3994 * @res_name: Name to be associated with resource
3995 *
3996 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3997 * being reserved by owner @res_name.  Do not access any
3998 * address inside the PCI regions unless this call returns
3999 * successfully.
4000 *
4001 * Returns 0 on success, or %EBUSY on error.  A warning
4002 * message is also printed on failure.
4003 */
4004int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
4005{
4006	return __pci_request_region(pdev, bar, res_name, 0);
4007}
4008EXPORT_SYMBOL(pci_request_region);
4009
4010/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4011 * pci_release_selected_regions - Release selected PCI I/O and memory resources
4012 * @pdev: PCI device whose resources were previously reserved
4013 * @bars: Bitmask of BARs to be released
4014 *
4015 * Release selected PCI I/O and memory resources previously reserved.
4016 * Call this function only after all use of the PCI regions has ceased.
4017 */
4018void pci_release_selected_regions(struct pci_dev *pdev, int bars)
4019{
4020	int i;
4021
4022	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4023		if (bars & (1 << i))
4024			pci_release_region(pdev, i);
4025}
4026EXPORT_SYMBOL(pci_release_selected_regions);
4027
4028static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
4029					  const char *res_name, int excl)
4030{
4031	int i;
4032
4033	for (i = 0; i < PCI_STD_NUM_BARS; i++)
4034		if (bars & (1 << i))
4035			if (__pci_request_region(pdev, i, res_name, excl))
4036				goto err_out;
4037	return 0;
4038
4039err_out:
4040	while (--i >= 0)
4041		if (bars & (1 << i))
4042			pci_release_region(pdev, i);
4043
4044	return -EBUSY;
4045}
4046
4047
4048/**
4049 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
4050 * @pdev: PCI device whose resources are to be reserved
4051 * @bars: Bitmask of BARs to be requested
4052 * @res_name: Name to be associated with resource
4053 */
4054int pci_request_selected_regions(struct pci_dev *pdev, int bars,
4055				 const char *res_name)
4056{
4057	return __pci_request_selected_regions(pdev, bars, res_name, 0);
4058}
4059EXPORT_SYMBOL(pci_request_selected_regions);
4060
4061int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
4062					   const char *res_name)
4063{
4064	return __pci_request_selected_regions(pdev, bars, res_name,
4065			IORESOURCE_EXCLUSIVE);
4066}
4067EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
4068
4069/**
4070 * pci_release_regions - Release reserved PCI I/O and memory resources
4071 * @pdev: PCI device whose resources were previously reserved by
4072 *	  pci_request_regions()
4073 *
4074 * Releases all PCI I/O and memory resources previously reserved by a
4075 * successful call to pci_request_regions().  Call this function only
4076 * after all use of the PCI regions has ceased.
4077 */
4078
4079void pci_release_regions(struct pci_dev *pdev)
4080{
4081	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
4082}
4083EXPORT_SYMBOL(pci_release_regions);
4084
4085/**
4086 * pci_request_regions - Reserve PCI I/O and memory resources
4087 * @pdev: PCI device whose resources are to be reserved
4088 * @res_name: Name to be associated with resource.
4089 *
4090 * Mark all PCI regions associated with PCI device @pdev as
4091 * being reserved by owner @res_name.  Do not access any
4092 * address inside the PCI regions unless this call returns
4093 * successfully.
4094 *
4095 * Returns 0 on success, or %EBUSY on error.  A warning
4096 * message is also printed on failure.
4097 */
4098int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4099{
4100	return pci_request_selected_regions(pdev,
4101			((1 << PCI_STD_NUM_BARS) - 1), res_name);
4102}
4103EXPORT_SYMBOL(pci_request_regions);
4104
4105/**
4106 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4107 * @pdev: PCI device whose resources are to be reserved
4108 * @res_name: Name to be associated with resource.
4109 *
4110 * Mark all PCI regions associated with PCI device @pdev as being reserved
4111 * by owner @res_name.  Do not access any address inside the PCI regions
4112 * unless this call returns successfully.
 
4113 *
4114 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4115 * and the sysfs MMIO access will not be allowed.
4116 *
4117 * Returns 0 on success, or %EBUSY on error.  A warning message is also
4118 * printed on failure.
4119 */
4120int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4121{
4122	return pci_request_selected_regions_exclusive(pdev,
4123				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4124}
4125EXPORT_SYMBOL(pci_request_regions_exclusive);
4126
4127/*
4128 * Record the PCI IO range (expressed as CPU physical address + size).
4129 * Return a negative value if an error has occurred, zero otherwise
4130 */
4131int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4132			resource_size_t	size)
4133{
4134	int ret = 0;
4135#ifdef PCI_IOBASE
4136	struct logic_pio_hwaddr *range;
4137
4138	if (!size || addr + size < addr)
4139		return -EINVAL;
4140
4141	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4142	if (!range)
4143		return -ENOMEM;
4144
4145	range->fwnode = fwnode;
4146	range->size = size;
4147	range->hw_start = addr;
4148	range->flags = LOGIC_PIO_CPU_MMIO;
4149
4150	ret = logic_pio_register_range(range);
4151	if (ret)
4152		kfree(range);
4153
4154	/* Ignore duplicates due to deferred probing */
4155	if (ret == -EEXIST)
4156		ret = 0;
4157#endif
4158
4159	return ret;
4160}
4161
4162phys_addr_t pci_pio_to_address(unsigned long pio)
4163{
4164	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4165
4166#ifdef PCI_IOBASE
4167	if (pio >= MMIO_UPPER_LIMIT)
4168		return address;
4169
4170	address = logic_pio_to_hwaddr(pio);
4171#endif
4172
4173	return address;
4174}
4175EXPORT_SYMBOL_GPL(pci_pio_to_address);
4176
4177unsigned long __weak pci_address_to_pio(phys_addr_t address)
4178{
4179#ifdef PCI_IOBASE
4180	return logic_pio_trans_cpuaddr(address);
4181#else
4182	if (address > IO_SPACE_LIMIT)
4183		return (unsigned long)-1;
4184
4185	return (unsigned long) address;
4186#endif
4187}
4188
4189/**
4190 * pci_remap_iospace - Remap the memory mapped I/O space
4191 * @res: Resource describing the I/O space
4192 * @phys_addr: physical address of range to be mapped
4193 *
4194 * Remap the memory mapped I/O space described by the @res and the CPU
4195 * physical address @phys_addr into virtual address space.  Only
4196 * architectures that have memory mapped IO functions defined (and the
4197 * PCI_IOBASE value defined) should call this function.
4198 */
4199#ifndef pci_remap_iospace
4200int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4201{
4202#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4203	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4204
4205	if (!(res->flags & IORESOURCE_IO))
4206		return -EINVAL;
4207
4208	if (res->end > IO_SPACE_LIMIT)
4209		return -EINVAL;
4210
4211	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4212				  pgprot_device(PAGE_KERNEL));
4213#else
4214	/*
4215	 * This architecture does not have memory mapped I/O space,
4216	 * so this function should never be called
4217	 */
4218	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4219	return -ENODEV;
4220#endif
4221}
4222EXPORT_SYMBOL(pci_remap_iospace);
4223#endif
4224
4225/**
4226 * pci_unmap_iospace - Unmap the memory mapped I/O space
4227 * @res: resource to be unmapped
4228 *
4229 * Unmap the CPU virtual address @res from virtual address space.  Only
4230 * architectures that have memory mapped IO functions defined (and the
4231 * PCI_IOBASE value defined) should call this function.
4232 */
4233void pci_unmap_iospace(struct resource *res)
4234{
4235#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4236	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4237
4238	vunmap_range(vaddr, vaddr + resource_size(res));
4239#endif
4240}
4241EXPORT_SYMBOL(pci_unmap_iospace);
4242
4243static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4244{
4245	struct resource **res = ptr;
4246
4247	pci_unmap_iospace(*res);
4248}
4249
4250/**
4251 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4252 * @dev: Generic device to remap IO address for
4253 * @res: Resource describing the I/O space
4254 * @phys_addr: physical address of range to be mapped
4255 *
4256 * Managed pci_remap_iospace().  Map is automatically unmapped on driver
4257 * detach.
4258 */
4259int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4260			   phys_addr_t phys_addr)
4261{
4262	const struct resource **ptr;
4263	int error;
4264
4265	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4266	if (!ptr)
4267		return -ENOMEM;
4268
4269	error = pci_remap_iospace(res, phys_addr);
4270	if (error) {
4271		devres_free(ptr);
4272	} else	{
4273		*ptr = res;
4274		devres_add(dev, ptr);
4275	}
4276
4277	return error;
4278}
4279EXPORT_SYMBOL(devm_pci_remap_iospace);
4280
4281/**
4282 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4283 * @dev: Generic device to remap IO address for
4284 * @offset: Resource address to map
4285 * @size: Size of map
4286 *
4287 * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4288 * detach.
4289 */
4290void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4291				      resource_size_t offset,
4292				      resource_size_t size)
4293{
4294	void __iomem **ptr, *addr;
4295
4296	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4297	if (!ptr)
4298		return NULL;
4299
4300	addr = pci_remap_cfgspace(offset, size);
4301	if (addr) {
4302		*ptr = addr;
4303		devres_add(dev, ptr);
4304	} else
4305		devres_free(ptr);
4306
4307	return addr;
4308}
4309EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4310
4311/**
4312 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4313 * @dev: generic device to handle the resource for
4314 * @res: configuration space resource to be handled
4315 *
4316 * Checks that a resource is a valid memory region, requests the memory
4317 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4318 * proper PCI configuration space memory attributes are guaranteed.
4319 *
4320 * All operations are managed and will be undone on driver detach.
4321 *
4322 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4323 * on failure. Usage example::
4324 *
4325 *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4326 *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4327 *	if (IS_ERR(base))
4328 *		return PTR_ERR(base);
4329 */
4330void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4331					  struct resource *res)
4332{
4333	resource_size_t size;
4334	const char *name;
4335	void __iomem *dest_ptr;
4336
4337	BUG_ON(!dev);
4338
4339	if (!res || resource_type(res) != IORESOURCE_MEM) {
4340		dev_err(dev, "invalid resource\n");
4341		return IOMEM_ERR_PTR(-EINVAL);
4342	}
4343
4344	size = resource_size(res);
4345
4346	if (res->name)
4347		name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4348				      res->name);
4349	else
4350		name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4351	if (!name)
4352		return IOMEM_ERR_PTR(-ENOMEM);
4353
4354	if (!devm_request_mem_region(dev, res->start, size, name)) {
4355		dev_err(dev, "can't request region for resource %pR\n", res);
4356		return IOMEM_ERR_PTR(-EBUSY);
4357	}
4358
4359	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4360	if (!dest_ptr) {
4361		dev_err(dev, "ioremap failed for resource %pR\n", res);
4362		devm_release_mem_region(dev, res->start, size);
4363		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4364	}
4365
4366	return dest_ptr;
4367}
4368EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4369
4370static void __pci_set_master(struct pci_dev *dev, bool enable)
4371{
4372	u16 old_cmd, cmd;
4373
4374	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4375	if (enable)
4376		cmd = old_cmd | PCI_COMMAND_MASTER;
4377	else
4378		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4379	if (cmd != old_cmd) {
4380		pci_dbg(dev, "%s bus mastering\n",
4381			enable ? "enabling" : "disabling");
4382		pci_write_config_word(dev, PCI_COMMAND, cmd);
4383	}
4384	dev->is_busmaster = enable;
4385}
4386
4387/**
4388 * pcibios_setup - process "pci=" kernel boot arguments
4389 * @str: string used to pass in "pci=" kernel boot arguments
4390 *
4391 * Process kernel boot arguments.  This is the default implementation.
4392 * Architecture specific implementations can override this as necessary.
4393 */
4394char * __weak __init pcibios_setup(char *str)
4395{
4396	return str;
4397}
4398
4399/**
4400 * pcibios_set_master - enable PCI bus-mastering for device dev
4401 * @dev: the PCI device to enable
4402 *
4403 * Enables PCI bus-mastering for the device.  This is the default
4404 * implementation.  Architecture specific implementations can override
4405 * this if necessary.
4406 */
4407void __weak pcibios_set_master(struct pci_dev *dev)
4408{
4409	u8 lat;
4410
4411	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4412	if (pci_is_pcie(dev))
4413		return;
4414
4415	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4416	if (lat < 16)
4417		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4418	else if (lat > pcibios_max_latency)
4419		lat = pcibios_max_latency;
4420	else
4421		return;
4422
4423	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4424}
4425
4426/**
4427 * pci_set_master - enables bus-mastering for device dev
4428 * @dev: the PCI device to enable
4429 *
4430 * Enables bus-mastering on the device and calls pcibios_set_master()
4431 * to do the needed arch specific settings.
4432 */
4433void pci_set_master(struct pci_dev *dev)
4434{
4435	__pci_set_master(dev, true);
4436	pcibios_set_master(dev);
4437}
4438EXPORT_SYMBOL(pci_set_master);
4439
4440/**
4441 * pci_clear_master - disables bus-mastering for device dev
4442 * @dev: the PCI device to disable
4443 */
4444void pci_clear_master(struct pci_dev *dev)
4445{
4446	__pci_set_master(dev, false);
4447}
4448EXPORT_SYMBOL(pci_clear_master);
4449
4450/**
4451 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4452 * @dev: the PCI device for which MWI is to be enabled
4453 *
4454 * Helper function for pci_set_mwi.
4455 * Originally copied from drivers/net/acenic.c.
4456 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4457 *
4458 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4459 */
4460int pci_set_cacheline_size(struct pci_dev *dev)
4461{
4462	u8 cacheline_size;
4463
4464	if (!pci_cache_line_size)
4465		return -EINVAL;
4466
4467	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4468	   equal to or multiple of the right value. */
4469	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4470	if (cacheline_size >= pci_cache_line_size &&
4471	    (cacheline_size % pci_cache_line_size) == 0)
4472		return 0;
4473
4474	/* Write the correct value. */
4475	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4476	/* Read it back. */
4477	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4478	if (cacheline_size == pci_cache_line_size)
4479		return 0;
4480
4481	pci_dbg(dev, "cache line size of %d is not supported\n",
4482		   pci_cache_line_size << 2);
4483
4484	return -EINVAL;
4485}
4486EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4487
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4488/**
4489 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4490 * @dev: the PCI device for which MWI is enabled
4491 *
4492 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4493 *
4494 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4495 */
4496int pci_set_mwi(struct pci_dev *dev)
 
4497{
4498#ifdef PCI_DISABLE_MWI
4499	return 0;
4500#else
4501	int rc;
4502	u16 cmd;
4503
4504	rc = pci_set_cacheline_size(dev);
4505	if (rc)
4506		return rc;
4507
4508	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4509	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4510		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4511		cmd |= PCI_COMMAND_INVALIDATE;
4512		pci_write_config_word(dev, PCI_COMMAND, cmd);
4513	}
 
4514	return 0;
4515#endif
4516}
4517EXPORT_SYMBOL(pci_set_mwi);
4518
4519/**
4520 * pcim_set_mwi - a device-managed pci_set_mwi()
4521 * @dev: the PCI device for which MWI is enabled
4522 *
4523 * Managed pci_set_mwi().
4524 *
4525 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4526 */
4527int pcim_set_mwi(struct pci_dev *dev)
4528{
4529	struct pci_devres *dr;
4530
4531	dr = find_pci_dr(dev);
4532	if (!dr)
4533		return -ENOMEM;
4534
4535	dr->mwi = 1;
4536	return pci_set_mwi(dev);
4537}
4538EXPORT_SYMBOL(pcim_set_mwi);
4539
4540/**
4541 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4542 * @dev: the PCI device for which MWI is enabled
4543 *
4544 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4545 * Callers are not required to check the return value.
4546 *
4547 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4548 */
4549int pci_try_set_mwi(struct pci_dev *dev)
4550{
4551#ifdef PCI_DISABLE_MWI
4552	return 0;
4553#else
4554	return pci_set_mwi(dev);
4555#endif
4556}
4557EXPORT_SYMBOL(pci_try_set_mwi);
4558
4559/**
4560 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4561 * @dev: the PCI device to disable
4562 *
4563 * Disables PCI Memory-Write-Invalidate transaction on the device
4564 */
4565void pci_clear_mwi(struct pci_dev *dev)
 
4566{
4567#ifndef PCI_DISABLE_MWI
4568	u16 cmd;
4569
4570	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4571	if (cmd & PCI_COMMAND_INVALIDATE) {
4572		cmd &= ~PCI_COMMAND_INVALIDATE;
4573		pci_write_config_word(dev, PCI_COMMAND, cmd);
4574	}
4575#endif
4576}
4577EXPORT_SYMBOL(pci_clear_mwi);
4578
4579/**
4580 * pci_disable_parity - disable parity checking for device
4581 * @dev: the PCI device to operate on
4582 *
4583 * Disable parity checking for device @dev
4584 */
4585void pci_disable_parity(struct pci_dev *dev)
4586{
4587	u16 cmd;
4588
4589	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4590	if (cmd & PCI_COMMAND_PARITY) {
4591		cmd &= ~PCI_COMMAND_PARITY;
4592		pci_write_config_word(dev, PCI_COMMAND, cmd);
4593	}
4594}
 
4595
4596/**
4597 * pci_intx - enables/disables PCI INTx for device dev
4598 * @pdev: the PCI device to operate on
4599 * @enable: boolean: whether to enable or disable PCI INTx
4600 *
4601 * Enables/disables PCI INTx for device @pdev
4602 */
4603void pci_intx(struct pci_dev *pdev, int enable)
 
4604{
4605	u16 pci_command, new;
4606
4607	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4608
4609	if (enable)
4610		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4611	else
4612		new = pci_command | PCI_COMMAND_INTX_DISABLE;
 
4613
4614	if (new != pci_command) {
4615		struct pci_devres *dr;
4616
4617		pci_write_config_word(pdev, PCI_COMMAND, new);
4618
4619		dr = find_pci_dr(pdev);
4620		if (dr && !dr->restore_intx) {
4621			dr->restore_intx = 1;
4622			dr->orig_intx = !enable;
4623		}
4624	}
4625}
4626EXPORT_SYMBOL_GPL(pci_intx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4627
4628static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4629{
4630	struct pci_bus *bus = dev->bus;
4631	bool mask_updated = true;
4632	u32 cmd_status_dword;
4633	u16 origcmd, newcmd;
4634	unsigned long flags;
4635	bool irq_pending;
4636
4637	/*
4638	 * We do a single dword read to retrieve both command and status.
4639	 * Document assumptions that make this possible.
4640	 */
4641	BUILD_BUG_ON(PCI_COMMAND % 4);
4642	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4643
4644	raw_spin_lock_irqsave(&pci_lock, flags);
4645
4646	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4647
4648	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4649
4650	/*
4651	 * Check interrupt status register to see whether our device
4652	 * triggered the interrupt (when masking) or the next IRQ is
4653	 * already pending (when unmasking).
4654	 */
4655	if (mask != irq_pending) {
4656		mask_updated = false;
4657		goto done;
4658	}
4659
4660	origcmd = cmd_status_dword;
4661	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4662	if (mask)
4663		newcmd |= PCI_COMMAND_INTX_DISABLE;
4664	if (newcmd != origcmd)
4665		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4666
4667done:
4668	raw_spin_unlock_irqrestore(&pci_lock, flags);
4669
4670	return mask_updated;
4671}
4672
4673/**
4674 * pci_check_and_mask_intx - mask INTx on pending interrupt
4675 * @dev: the PCI device to operate on
4676 *
4677 * Check if the device dev has its INTx line asserted, mask it and return
4678 * true in that case. False is returned if no interrupt was pending.
 
4679 */
4680bool pci_check_and_mask_intx(struct pci_dev *dev)
4681{
4682	return pci_check_and_set_intx_mask(dev, true);
4683}
4684EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4685
4686/**
4687 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4688 * @dev: the PCI device to operate on
4689 *
4690 * Check if the device dev has its INTx line asserted, unmask it if not and
4691 * return true. False is returned and the mask remains active if there was
4692 * still an interrupt pending.
4693 */
4694bool pci_check_and_unmask_intx(struct pci_dev *dev)
4695{
4696	return pci_check_and_set_intx_mask(dev, false);
4697}
4698EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4699
4700/**
4701 * pci_wait_for_pending_transaction - wait for pending transaction
4702 * @dev: the PCI device to operate on
4703 *
4704 * Return 0 if transaction is pending 1 otherwise.
 
 
4705 */
4706int pci_wait_for_pending_transaction(struct pci_dev *dev)
4707{
4708	if (!pci_is_pcie(dev))
4709		return 1;
4710
4711	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4712				    PCI_EXP_DEVSTA_TRPND);
 
 
 
 
 
 
 
 
 
 
4713}
4714EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4715
4716/**
4717 * pcie_flr - initiate a PCIe function level reset
4718 * @dev: device to reset
4719 *
4720 * Initiate a function level reset unconditionally on @dev without
4721 * checking any flags and DEVCAP
4722 */
4723int pcie_flr(struct pci_dev *dev)
4724{
4725	if (!pci_wait_for_pending_transaction(dev))
4726		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
 
4727
4728	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4729
4730	if (dev->imm_ready)
4731		return 0;
4732
4733	/*
4734	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4735	 * 100ms, but may silently discard requests while the FLR is in
4736	 * progress.  Wait 100ms before trying to access the device.
4737	 */
4738	msleep(100);
4739
4740	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4741}
4742EXPORT_SYMBOL_GPL(pcie_flr);
4743
4744/**
4745 * pcie_reset_flr - initiate a PCIe function level reset
4746 * @dev: device to reset
4747 * @probe: if true, return 0 if device can be reset this way
4748 *
4749 * Initiate a function level reset on @dev.
4750 */
4751int pcie_reset_flr(struct pci_dev *dev, bool probe)
4752{
4753	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
 
 
 
 
 
 
4754		return -ENOTTY;
4755
4756	if (!(dev->devcap & PCI_EXP_DEVCAP_FLR))
 
4757		return -ENOTTY;
4758
4759	if (probe)
4760		return 0;
4761
4762	return pcie_flr(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4763}
4764EXPORT_SYMBOL_GPL(pcie_reset_flr);
4765
4766static int pci_af_flr(struct pci_dev *dev, bool probe)
4767{
 
4768	int pos;
4769	u8 cap;
 
4770
4771	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4772	if (!pos)
4773		return -ENOTTY;
4774
4775	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4776		return -ENOTTY;
4777
4778	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4779	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4780		return -ENOTTY;
4781
4782	if (probe)
4783		return 0;
4784
4785	/*
4786	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4787	 * is used, so we use the control offset rather than status and shift
4788	 * the test bit to match.
4789	 */
4790	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4791				 PCI_AF_STATUS_TP << 8))
4792		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4793
4794	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
 
 
 
4795
4796	if (dev->imm_ready)
4797		return 0;
4798
4799	/*
4800	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4801	 * updated 27 July 2006; a device must complete an FLR within
4802	 * 100ms, but may silently discard requests while the FLR is in
4803	 * progress.  Wait 100ms before trying to access the device.
4804	 */
4805	msleep(100);
4806
4807	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4808}
4809
4810/**
4811 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4812 * @dev: Device to reset.
4813 * @probe: if true, return 0 if the device can be reset this way.
4814 *
4815 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4816 * unset, it will be reinitialized internally when going from PCI_D3hot to
4817 * PCI_D0.  If that's the case and the device is not in a low-power state
4818 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4819 *
4820 * NOTE: This causes the caller to sleep for twice the device power transition
4821 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4822 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4823 * Moreover, only devices in D0 can be reset by this function.
4824 */
4825static int pci_pm_reset(struct pci_dev *dev, bool probe)
4826{
4827	u16 csr;
4828
4829	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4830		return -ENOTTY;
4831
4832	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4833	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4834		return -ENOTTY;
4835
4836	if (probe)
4837		return 0;
4838
4839	if (dev->current_state != PCI_D0)
4840		return -EINVAL;
4841
4842	csr &= ~PCI_PM_CTRL_STATE_MASK;
4843	csr |= PCI_D3hot;
4844	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4845	pci_dev_d3_sleep(dev);
4846
4847	csr &= ~PCI_PM_CTRL_STATE_MASK;
4848	csr |= PCI_D0;
4849	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4850	pci_dev_d3_sleep(dev);
4851
4852	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4853}
4854
4855/**
4856 * pcie_wait_for_link_delay - Wait until link is active or inactive
4857 * @pdev: Bridge device
4858 * @active: waiting for active or inactive?
4859 * @delay: Delay to wait after link has become active (in ms)
4860 *
4861 * Use this to wait till link becomes active or inactive.
4862 */
4863static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4864				     int delay)
4865{
4866	int timeout = 1000;
4867	bool ret;
4868	u16 lnk_status;
4869
4870	/*
4871	 * Some controllers might not implement link active reporting. In this
4872	 * case, we wait for 1000 ms + any delay requested by the caller.
4873	 */
4874	if (!pdev->link_active_reporting) {
4875		msleep(timeout + delay);
4876		return true;
4877	}
4878
4879	/*
4880	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4881	 * after which we should expect an link active if the reset was
4882	 * successful. If so, software must wait a minimum 100ms before sending
4883	 * configuration requests to devices downstream this port.
4884	 *
4885	 * If the link fails to activate, either the device was physically
4886	 * removed or the link is permanently failed.
4887	 */
4888	if (active)
4889		msleep(20);
4890	for (;;) {
4891		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4892		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4893		if (ret == active)
4894			break;
4895		if (timeout <= 0)
4896			break;
4897		msleep(10);
4898		timeout -= 10;
4899	}
4900	if (active && ret)
4901		msleep(delay);
4902
4903	return ret == active;
4904}
4905
4906/**
4907 * pcie_wait_for_link - Wait until link is active or inactive
4908 * @pdev: Bridge device
4909 * @active: waiting for active or inactive?
4910 *
4911 * Use this to wait till link becomes active or inactive.
4912 */
4913bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4914{
4915	return pcie_wait_for_link_delay(pdev, active, 100);
4916}
4917
4918/*
4919 * Find maximum D3cold delay required by all the devices on the bus.  The
4920 * spec says 100 ms, but firmware can lower it and we allow drivers to
4921 * increase it as well.
4922 *
4923 * Called with @pci_bus_sem locked for reading.
4924 */
4925static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4926{
4927	const struct pci_dev *pdev;
4928	int min_delay = 100;
4929	int max_delay = 0;
4930
4931	list_for_each_entry(pdev, &bus->devices, bus_list) {
4932		if (pdev->d3cold_delay < min_delay)
4933			min_delay = pdev->d3cold_delay;
4934		if (pdev->d3cold_delay > max_delay)
4935			max_delay = pdev->d3cold_delay;
4936	}
4937
4938	return max(min_delay, max_delay);
4939}
4940
4941/**
4942 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4943 * @dev: PCI bridge
4944 *
4945 * Handle necessary delays before access to the devices on the secondary
4946 * side of the bridge are permitted after D3cold to D0 transition.
4947 *
4948 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4949 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4950 * 4.3.2.
4951 */
4952void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4953{
4954	struct pci_dev *child;
4955	int delay;
4956
4957	if (pci_dev_is_disconnected(dev))
4958		return;
4959
4960	if (!pci_is_bridge(dev) || !dev->bridge_d3)
4961		return;
4962
4963	down_read(&pci_bus_sem);
4964
4965	/*
4966	 * We only deal with devices that are present currently on the bus.
4967	 * For any hot-added devices the access delay is handled in pciehp
4968	 * board_added(). In case of ACPI hotplug the firmware is expected
4969	 * to configure the devices before OS is notified.
4970	 */
4971	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4972		up_read(&pci_bus_sem);
4973		return;
4974	}
4975
4976	/* Take d3cold_delay requirements into account */
4977	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4978	if (!delay) {
4979		up_read(&pci_bus_sem);
4980		return;
4981	}
4982
4983	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4984				 bus_list);
4985	up_read(&pci_bus_sem);
4986
4987	/*
4988	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4989	 * accessing the device after reset (that is 1000 ms + 100 ms). In
4990	 * practice this should not be needed because we don't do power
4991	 * management for them (see pci_bridge_d3_possible()).
4992	 */
4993	if (!pci_is_pcie(dev)) {
4994		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4995		msleep(1000 + delay);
4996		return;
4997	}
4998
4999	/*
5000	 * For PCIe downstream and root ports that do not support speeds
5001	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
5002	 * speeds (gen3) we need to wait first for the data link layer to
5003	 * become active.
5004	 *
5005	 * However, 100 ms is the minimum and the PCIe spec says the
5006	 * software must allow at least 1s before it can determine that the
5007	 * device that did not respond is a broken device. There is
5008	 * evidence that 100 ms is not always enough, for example certain
5009	 * Titan Ridge xHCI controller does not always respond to
5010	 * configuration requests if we only wait for 100 ms (see
5011	 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
5012	 *
5013	 * Therefore we wait for 100 ms and check for the device presence.
5014	 * If it is still not present give it an additional 100 ms.
5015	 */
5016	if (!pcie_downstream_port(dev))
5017		return;
5018
5019	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
5020		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
5021		msleep(delay);
5022	} else {
5023		pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
5024			delay);
5025		if (!pcie_wait_for_link_delay(dev, true, delay)) {
5026			/* Did not train, no need to wait any further */
5027			pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
5028			return;
5029		}
5030	}
5031
5032	if (!pci_device_is_present(child)) {
5033		pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
5034		msleep(delay);
5035	}
5036}
5037
5038void pci_reset_secondary_bus(struct pci_dev *dev)
5039{
5040	u16 ctrl;
5041
5042	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
5043	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
5044	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5045
5046	/*
5047	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
5048	 * this to 2ms to ensure that we meet the minimum requirement.
5049	 */
5050	msleep(2);
5051
5052	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
5053	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
5054
5055	/*
5056	 * Trhfa for conventional PCI is 2^25 clock cycles.
5057	 * Assuming a minimum 33MHz clock this results in a 1s
5058	 * delay before we can consider subordinate devices to
5059	 * be re-initialized.  PCIe has some ways to shorten this,
5060	 * but we don't make use of them yet.
5061	 */
5062	ssleep(1);
5063}
5064
5065void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
5066{
5067	pci_reset_secondary_bus(dev);
5068}
5069
5070/**
5071 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
5072 * @dev: Bridge device
5073 *
5074 * Use the bridge control register to assert reset on the secondary bus.
5075 * Devices on the secondary bus are left in power-on state.
5076 */
5077int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
5078{
5079	pcibios_reset_secondary_bus(dev);
5080
5081	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
5082}
5083EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
5084
5085static int pci_parent_bus_reset(struct pci_dev *dev, bool probe)
5086{
5087	struct pci_dev *pdev;
5088
5089	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5090	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5091		return -ENOTTY;
5092
5093	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5094		if (pdev != dev)
5095			return -ENOTTY;
5096
5097	if (probe)
5098		return 0;
5099
5100	return pci_bridge_secondary_bus_reset(dev->bus->self);
5101}
 
 
5102
5103static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, bool probe)
5104{
5105	int rc = -ENOTTY;
5106
5107	if (!hotplug || !try_module_get(hotplug->owner))
5108		return rc;
5109
5110	if (hotplug->ops->reset_slot)
5111		rc = hotplug->ops->reset_slot(hotplug, probe);
5112
5113	module_put(hotplug->owner);
5114
5115	return rc;
5116}
5117
5118static int pci_dev_reset_slot_function(struct pci_dev *dev, bool probe)
5119{
5120	if (dev->multifunction || dev->subordinate || !dev->slot ||
5121	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5122		return -ENOTTY;
5123
5124	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
5125}
5126
5127static int pci_reset_bus_function(struct pci_dev *dev, bool probe)
5128{
5129	int rc;
5130
5131	rc = pci_dev_reset_slot_function(dev, probe);
5132	if (rc != -ENOTTY)
5133		return rc;
5134	return pci_parent_bus_reset(dev, probe);
5135}
5136
5137void pci_dev_lock(struct pci_dev *dev)
5138{
5139	/* block PM suspend, driver probe, etc. */
5140	device_lock(&dev->dev);
5141	pci_cfg_access_lock(dev);
5142}
5143EXPORT_SYMBOL_GPL(pci_dev_lock);
5144
5145/* Return 1 on successful lock, 0 on contention */
5146int pci_dev_trylock(struct pci_dev *dev)
5147{
5148	if (device_trylock(&dev->dev)) {
5149		if (pci_cfg_access_trylock(dev))
5150			return 1;
5151		device_unlock(&dev->dev);
5152	}
5153
5154	return 0;
 
 
5155}
5156EXPORT_SYMBOL_GPL(pci_dev_trylock);
5157
5158void pci_dev_unlock(struct pci_dev *dev)
5159{
5160	pci_cfg_access_unlock(dev);
5161	device_unlock(&dev->dev);
5162}
5163EXPORT_SYMBOL_GPL(pci_dev_unlock);
5164
5165static void pci_dev_save_and_disable(struct pci_dev *dev)
5166{
5167	const struct pci_error_handlers *err_handler =
5168			dev->driver ? dev->driver->err_handler : NULL;
5169
5170	/*
5171	 * dev->driver->err_handler->reset_prepare() is protected against
5172	 * races with ->remove() by the device lock, which must be held by
5173	 * the caller.
5174	 */
5175	if (err_handler && err_handler->reset_prepare)
5176		err_handler->reset_prepare(dev);
5177
5178	/*
5179	 * Wake-up device prior to save.  PM registers default to D0 after
5180	 * reset and a simple register restore doesn't reliably return
5181	 * to a non-D0 state anyway.
5182	 */
5183	pci_set_power_state(dev, PCI_D0);
5184
5185	pci_save_state(dev);
5186	/*
5187	 * Disable the device by clearing the Command register, except for
5188	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5189	 * BARs, but also prevents the device from being Bus Master, preventing
5190	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5191	 * compliant devices, INTx-disable prevents legacy interrupts.
5192	 */
5193	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5194}
5195
5196static void pci_dev_restore(struct pci_dev *dev)
5197{
5198	const struct pci_error_handlers *err_handler =
5199			dev->driver ? dev->driver->err_handler : NULL;
5200
5201	pci_restore_state(dev);
5202
5203	/*
5204	 * dev->driver->err_handler->reset_done() is protected against
5205	 * races with ->remove() by the device lock, which must be held by
5206	 * the caller.
5207	 */
5208	if (err_handler && err_handler->reset_done)
5209		err_handler->reset_done(dev);
5210}
5211
5212/* dev->reset_methods[] is a 0-terminated list of indices into this array */
5213static const struct pci_reset_fn_method pci_reset_fn_methods[] = {
5214	{ },
5215	{ pci_dev_specific_reset, .name = "device_specific" },
5216	{ pci_dev_acpi_reset, .name = "acpi" },
5217	{ pcie_reset_flr, .name = "flr" },
5218	{ pci_af_flr, .name = "af_flr" },
5219	{ pci_pm_reset, .name = "pm" },
5220	{ pci_reset_bus_function, .name = "bus" },
5221};
5222
5223static ssize_t reset_method_show(struct device *dev,
5224				 struct device_attribute *attr, char *buf)
5225{
5226	struct pci_dev *pdev = to_pci_dev(dev);
5227	ssize_t len = 0;
5228	int i, m;
5229
5230	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5231		m = pdev->reset_methods[i];
5232		if (!m)
5233			break;
5234
5235		len += sysfs_emit_at(buf, len, "%s%s", len ? " " : "",
5236				     pci_reset_fn_methods[m].name);
5237	}
5238
5239	if (len)
5240		len += sysfs_emit_at(buf, len, "\n");
5241
5242	return len;
5243}
5244
5245static int reset_method_lookup(const char *name)
5246{
5247	int m;
5248
5249	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5250		if (sysfs_streq(name, pci_reset_fn_methods[m].name))
5251			return m;
5252	}
5253
5254	return 0;	/* not found */
5255}
5256
5257static ssize_t reset_method_store(struct device *dev,
5258				  struct device_attribute *attr,
5259				  const char *buf, size_t count)
5260{
5261	struct pci_dev *pdev = to_pci_dev(dev);
5262	char *options, *name;
5263	int m, n;
5264	u8 reset_methods[PCI_NUM_RESET_METHODS] = { 0 };
5265
5266	if (sysfs_streq(buf, "")) {
5267		pdev->reset_methods[0] = 0;
5268		pci_warn(pdev, "All device reset methods disabled by user");
5269		return count;
5270	}
5271
5272	if (sysfs_streq(buf, "default")) {
5273		pci_init_reset_methods(pdev);
5274		return count;
5275	}
5276
5277	options = kstrndup(buf, count, GFP_KERNEL);
5278	if (!options)
5279		return -ENOMEM;
5280
5281	n = 0;
5282	while ((name = strsep(&options, " ")) != NULL) {
5283		if (sysfs_streq(name, ""))
5284			continue;
5285
5286		name = strim(name);
5287
5288		m = reset_method_lookup(name);
5289		if (!m) {
5290			pci_err(pdev, "Invalid reset method '%s'", name);
5291			goto error;
5292		}
5293
5294		if (pci_reset_fn_methods[m].reset_fn(pdev, PCI_RESET_PROBE)) {
5295			pci_err(pdev, "Unsupported reset method '%s'", name);
5296			goto error;
5297		}
5298
5299		if (n == PCI_NUM_RESET_METHODS - 1) {
5300			pci_err(pdev, "Too many reset methods\n");
5301			goto error;
5302		}
5303
5304		reset_methods[n++] = m;
5305	}
5306
5307	reset_methods[n] = 0;
5308
5309	/* Warn if dev-specific supported but not highest priority */
5310	if (pci_reset_fn_methods[1].reset_fn(pdev, PCI_RESET_PROBE) == 0 &&
5311	    reset_methods[0] != 1)
5312		pci_warn(pdev, "Device-specific reset disabled/de-prioritized by user");
5313	memcpy(pdev->reset_methods, reset_methods, sizeof(pdev->reset_methods));
5314	kfree(options);
5315	return count;
5316
5317error:
5318	/* Leave previous methods unchanged */
5319	kfree(options);
5320	return -EINVAL;
5321}
5322static DEVICE_ATTR_RW(reset_method);
5323
5324static struct attribute *pci_dev_reset_method_attrs[] = {
5325	&dev_attr_reset_method.attr,
5326	NULL,
5327};
5328
5329static umode_t pci_dev_reset_method_attr_is_visible(struct kobject *kobj,
5330						    struct attribute *a, int n)
5331{
5332	struct pci_dev *pdev = to_pci_dev(kobj_to_dev(kobj));
5333
5334	if (!pci_reset_supported(pdev))
5335		return 0;
5336
5337	return a->mode;
5338}
5339
5340const struct attribute_group pci_dev_reset_method_attr_group = {
5341	.attrs = pci_dev_reset_method_attrs,
5342	.is_visible = pci_dev_reset_method_attr_is_visible,
5343};
5344
5345/**
5346 * __pci_reset_function_locked - reset a PCI device function while holding
5347 * the @dev mutex lock.
5348 * @dev: PCI device to reset
5349 *
5350 * Some devices allow an individual function to be reset without affecting
5351 * other functions in the same device.  The PCI device must be responsive
5352 * to PCI config space in order to use this function.
5353 *
5354 * The device function is presumed to be unused and the caller is holding
5355 * the device mutex lock when this function is called.
5356 *
5357 * Resetting the device will make the contents of PCI configuration space
5358 * random, so any caller of this must be prepared to reinitialise the
5359 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5360 * etc.
5361 *
5362 * Returns 0 if the device function was successfully reset or negative if the
5363 * device doesn't support resetting a single function.
5364 */
5365int __pci_reset_function_locked(struct pci_dev *dev)
5366{
5367	int i, m, rc;
5368
5369	might_sleep();
5370
5371	/*
5372	 * A reset method returns -ENOTTY if it doesn't support this device and
5373	 * we should try the next method.
5374	 *
5375	 * If it returns 0 (success), we're finished.  If it returns any other
5376	 * error, we're also finished: this indicates that further reset
5377	 * mechanisms might be broken on the device.
5378	 */
5379	for (i = 0; i < PCI_NUM_RESET_METHODS; i++) {
5380		m = dev->reset_methods[i];
5381		if (!m)
5382			return -ENOTTY;
5383
5384		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_DO_RESET);
5385		if (!rc)
5386			return 0;
5387		if (rc != -ENOTTY)
5388			return rc;
5389	}
5390
5391	return -ENOTTY;
5392}
5393EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5394
5395/**
5396 * pci_init_reset_methods - check whether device can be safely reset
5397 * and store supported reset mechanisms.
5398 * @dev: PCI device to check for reset mechanisms
5399 *
5400 * Some devices allow an individual function to be reset without affecting
5401 * other functions in the same device.  The PCI device must be in D0-D3hot
5402 * state.
5403 *
5404 * Stores reset mechanisms supported by device in reset_methods byte array
5405 * which is a member of struct pci_dev.
5406 */
5407void pci_init_reset_methods(struct pci_dev *dev)
5408{
5409	int m, i, rc;
5410
5411	BUILD_BUG_ON(ARRAY_SIZE(pci_reset_fn_methods) != PCI_NUM_RESET_METHODS);
5412
5413	might_sleep();
5414
5415	i = 0;
5416	for (m = 1; m < PCI_NUM_RESET_METHODS; m++) {
5417		rc = pci_reset_fn_methods[m].reset_fn(dev, PCI_RESET_PROBE);
5418		if (!rc)
5419			dev->reset_methods[i++] = m;
5420		else if (rc != -ENOTTY)
5421			break;
5422	}
5423
5424	dev->reset_methods[i] = 0;
5425}
5426
5427/**
5428 * pci_reset_function - quiesce and reset a PCI device function
5429 * @dev: PCI device to reset
5430 *
5431 * Some devices allow an individual function to be reset without affecting
5432 * other functions in the same device.  The PCI device must be responsive
5433 * to PCI config space in order to use this function.
5434 *
5435 * This function does not just reset the PCI portion of a device, but
5436 * clears all the state associated with the device.  This function differs
5437 * from __pci_reset_function_locked() in that it saves and restores device state
5438 * over the reset and takes the PCI device lock.
5439 *
5440 * Returns 0 if the device function was successfully reset or negative if the
5441 * device doesn't support resetting a single function.
5442 */
5443int pci_reset_function(struct pci_dev *dev)
5444{
5445	int rc;
5446
5447	if (!pci_reset_supported(dev))
5448		return -ENOTTY;
5449
5450	pci_dev_lock(dev);
5451	pci_dev_save_and_disable(dev);
5452
5453	rc = __pci_reset_function_locked(dev);
5454
5455	pci_dev_restore(dev);
5456	pci_dev_unlock(dev);
5457
5458	return rc;
5459}
5460EXPORT_SYMBOL_GPL(pci_reset_function);
5461
5462/**
5463 * pci_reset_function_locked - quiesce and reset a PCI device function
5464 * @dev: PCI device to reset
5465 *
5466 * Some devices allow an individual function to be reset without affecting
5467 * other functions in the same device.  The PCI device must be responsive
5468 * to PCI config space in order to use this function.
5469 *
5470 * This function does not just reset the PCI portion of a device, but
5471 * clears all the state associated with the device.  This function differs
5472 * from __pci_reset_function_locked() in that it saves and restores device state
5473 * over the reset.  It also differs from pci_reset_function() in that it
5474 * requires the PCI device lock to be held.
5475 *
5476 * Returns 0 if the device function was successfully reset or negative if the
5477 * device doesn't support resetting a single function.
5478 */
5479int pci_reset_function_locked(struct pci_dev *dev)
5480{
5481	int rc;
5482
5483	if (!pci_reset_supported(dev))
5484		return -ENOTTY;
5485
5486	pci_dev_save_and_disable(dev);
5487
5488	rc = __pci_reset_function_locked(dev);
5489
5490	pci_dev_restore(dev);
5491
5492	return rc;
5493}
5494EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5495
5496/**
5497 * pci_try_reset_function - quiesce and reset a PCI device function
5498 * @dev: PCI device to reset
5499 *
5500 * Same as above, except return -EAGAIN if unable to lock device.
5501 */
5502int pci_try_reset_function(struct pci_dev *dev)
5503{
5504	int rc;
5505
5506	if (!pci_reset_supported(dev))
5507		return -ENOTTY;
5508
5509	if (!pci_dev_trylock(dev))
5510		return -EAGAIN;
5511
5512	pci_dev_save_and_disable(dev);
5513	rc = __pci_reset_function_locked(dev);
5514	pci_dev_restore(dev);
5515	pci_dev_unlock(dev);
5516
5517	return rc;
5518}
5519EXPORT_SYMBOL_GPL(pci_try_reset_function);
5520
5521/* Do any devices on or below this bus prevent a bus reset? */
5522static bool pci_bus_resetable(struct pci_bus *bus)
5523{
5524	struct pci_dev *dev;
5525
5526
5527	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5528		return false;
5529
5530	list_for_each_entry(dev, &bus->devices, bus_list) {
5531		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5532		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5533			return false;
5534	}
5535
5536	return true;
5537}
5538
5539/* Lock devices from the top of the tree down */
5540static void pci_bus_lock(struct pci_bus *bus)
5541{
5542	struct pci_dev *dev;
5543
5544	list_for_each_entry(dev, &bus->devices, bus_list) {
5545		pci_dev_lock(dev);
5546		if (dev->subordinate)
5547			pci_bus_lock(dev->subordinate);
5548	}
5549}
5550
5551/* Unlock devices from the bottom of the tree up */
5552static void pci_bus_unlock(struct pci_bus *bus)
5553{
5554	struct pci_dev *dev;
5555
5556	list_for_each_entry(dev, &bus->devices, bus_list) {
5557		if (dev->subordinate)
5558			pci_bus_unlock(dev->subordinate);
5559		pci_dev_unlock(dev);
5560	}
5561}
5562
5563/* Return 1 on successful lock, 0 on contention */
5564static int pci_bus_trylock(struct pci_bus *bus)
5565{
5566	struct pci_dev *dev;
5567
5568	list_for_each_entry(dev, &bus->devices, bus_list) {
5569		if (!pci_dev_trylock(dev))
5570			goto unlock;
5571		if (dev->subordinate) {
5572			if (!pci_bus_trylock(dev->subordinate)) {
5573				pci_dev_unlock(dev);
5574				goto unlock;
5575			}
5576		}
5577	}
5578	return 1;
5579
5580unlock:
5581	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5582		if (dev->subordinate)
5583			pci_bus_unlock(dev->subordinate);
5584		pci_dev_unlock(dev);
5585	}
5586	return 0;
5587}
5588
5589/* Do any devices on or below this slot prevent a bus reset? */
5590static bool pci_slot_resetable(struct pci_slot *slot)
5591{
5592	struct pci_dev *dev;
5593
5594	if (slot->bus->self &&
5595	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5596		return false;
5597
5598	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5599		if (!dev->slot || dev->slot != slot)
5600			continue;
5601		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5602		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5603			return false;
5604	}
5605
5606	return true;
5607}
5608
5609/* Lock devices from the top of the tree down */
5610static void pci_slot_lock(struct pci_slot *slot)
5611{
5612	struct pci_dev *dev;
5613
5614	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5615		if (!dev->slot || dev->slot != slot)
5616			continue;
5617		pci_dev_lock(dev);
5618		if (dev->subordinate)
5619			pci_bus_lock(dev->subordinate);
5620	}
5621}
5622
5623/* Unlock devices from the bottom of the tree up */
5624static void pci_slot_unlock(struct pci_slot *slot)
5625{
5626	struct pci_dev *dev;
5627
5628	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5629		if (!dev->slot || dev->slot != slot)
5630			continue;
5631		if (dev->subordinate)
5632			pci_bus_unlock(dev->subordinate);
5633		pci_dev_unlock(dev);
5634	}
5635}
5636
5637/* Return 1 on successful lock, 0 on contention */
5638static int pci_slot_trylock(struct pci_slot *slot)
5639{
5640	struct pci_dev *dev;
5641
5642	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5643		if (!dev->slot || dev->slot != slot)
5644			continue;
5645		if (!pci_dev_trylock(dev))
5646			goto unlock;
5647		if (dev->subordinate) {
5648			if (!pci_bus_trylock(dev->subordinate)) {
5649				pci_dev_unlock(dev);
5650				goto unlock;
5651			}
5652		}
5653	}
5654	return 1;
5655
5656unlock:
5657	list_for_each_entry_continue_reverse(dev,
5658					     &slot->bus->devices, bus_list) {
5659		if (!dev->slot || dev->slot != slot)
5660			continue;
5661		if (dev->subordinate)
5662			pci_bus_unlock(dev->subordinate);
5663		pci_dev_unlock(dev);
5664	}
5665	return 0;
5666}
5667
5668/*
5669 * Save and disable devices from the top of the tree down while holding
5670 * the @dev mutex lock for the entire tree.
5671 */
5672static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5673{
5674	struct pci_dev *dev;
5675
5676	list_for_each_entry(dev, &bus->devices, bus_list) {
5677		pci_dev_save_and_disable(dev);
5678		if (dev->subordinate)
5679			pci_bus_save_and_disable_locked(dev->subordinate);
5680	}
5681}
5682
5683/*
5684 * Restore devices from top of the tree down while holding @dev mutex lock
5685 * for the entire tree.  Parent bridges need to be restored before we can
5686 * get to subordinate devices.
5687 */
5688static void pci_bus_restore_locked(struct pci_bus *bus)
5689{
5690	struct pci_dev *dev;
5691
5692	list_for_each_entry(dev, &bus->devices, bus_list) {
5693		pci_dev_restore(dev);
5694		if (dev->subordinate)
5695			pci_bus_restore_locked(dev->subordinate);
5696	}
5697}
5698
5699/*
5700 * Save and disable devices from the top of the tree down while holding
5701 * the @dev mutex lock for the entire tree.
5702 */
5703static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5704{
5705	struct pci_dev *dev;
5706
5707	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5708		if (!dev->slot || dev->slot != slot)
5709			continue;
5710		pci_dev_save_and_disable(dev);
5711		if (dev->subordinate)
5712			pci_bus_save_and_disable_locked(dev->subordinate);
5713	}
5714}
5715
5716/*
5717 * Restore devices from top of the tree down while holding @dev mutex lock
5718 * for the entire tree.  Parent bridges need to be restored before we can
5719 * get to subordinate devices.
5720 */
5721static void pci_slot_restore_locked(struct pci_slot *slot)
5722{
5723	struct pci_dev *dev;
5724
5725	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5726		if (!dev->slot || dev->slot != slot)
5727			continue;
5728		pci_dev_restore(dev);
5729		if (dev->subordinate)
5730			pci_bus_restore_locked(dev->subordinate);
5731	}
5732}
5733
5734static int pci_slot_reset(struct pci_slot *slot, bool probe)
5735{
5736	int rc;
5737
5738	if (!slot || !pci_slot_resetable(slot))
5739		return -ENOTTY;
5740
5741	if (!probe)
5742		pci_slot_lock(slot);
5743
5744	might_sleep();
5745
5746	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5747
5748	if (!probe)
5749		pci_slot_unlock(slot);
5750
5751	return rc;
5752}
5753
5754/**
5755 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5756 * @slot: PCI slot to probe
5757 *
5758 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5759 */
5760int pci_probe_reset_slot(struct pci_slot *slot)
5761{
5762	return pci_slot_reset(slot, PCI_RESET_PROBE);
5763}
5764EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5765
5766/**
5767 * __pci_reset_slot - Try to reset a PCI slot
5768 * @slot: PCI slot to reset
5769 *
5770 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5771 * independent of other slots.  For instance, some slots may support slot power
5772 * control.  In the case of a 1:1 bus to slot architecture, this function may
5773 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5774 * Generally a slot reset should be attempted before a bus reset.  All of the
5775 * function of the slot and any subordinate buses behind the slot are reset
5776 * through this function.  PCI config space of all devices in the slot and
5777 * behind the slot is saved before and restored after reset.
5778 *
5779 * Same as above except return -EAGAIN if the slot cannot be locked
5780 */
5781static int __pci_reset_slot(struct pci_slot *slot)
5782{
5783	int rc;
5784
5785	rc = pci_slot_reset(slot, PCI_RESET_PROBE);
5786	if (rc)
5787		return rc;
5788
5789	if (pci_slot_trylock(slot)) {
5790		pci_slot_save_and_disable_locked(slot);
5791		might_sleep();
5792		rc = pci_reset_hotplug_slot(slot->hotplug, PCI_RESET_DO_RESET);
5793		pci_slot_restore_locked(slot);
5794		pci_slot_unlock(slot);
5795	} else
5796		rc = -EAGAIN;
5797
5798	return rc;
5799}
 
 
 
5800
5801static int pci_bus_reset(struct pci_bus *bus, bool probe)
5802{
5803	int ret;
5804
5805	if (!bus->self || !pci_bus_resetable(bus))
5806		return -ENOTTY;
5807
5808	if (probe)
5809		return 0;
5810
5811	pci_bus_lock(bus);
5812
5813	might_sleep();
5814
5815	ret = pci_bridge_secondary_bus_reset(bus->self);
5816
5817	pci_bus_unlock(bus);
5818
5819	return ret;
5820}
5821
5822/**
5823 * pci_bus_error_reset - reset the bridge's subordinate bus
5824 * @bridge: The parent device that connects to the bus to reset
5825 *
5826 * This function will first try to reset the slots on this bus if the method is
5827 * available. If slot reset fails or is not available, this will fall back to a
5828 * secondary bus reset.
5829 */
5830int pci_bus_error_reset(struct pci_dev *bridge)
5831{
5832	struct pci_bus *bus = bridge->subordinate;
5833	struct pci_slot *slot;
5834
5835	if (!bus)
5836		return -ENOTTY;
5837
5838	mutex_lock(&pci_slot_mutex);
5839	if (list_empty(&bus->slots))
5840		goto bus_reset;
5841
5842	list_for_each_entry(slot, &bus->slots, list)
5843		if (pci_probe_reset_slot(slot))
5844			goto bus_reset;
5845
5846	list_for_each_entry(slot, &bus->slots, list)
5847		if (pci_slot_reset(slot, PCI_RESET_DO_RESET))
5848			goto bus_reset;
5849
5850	mutex_unlock(&pci_slot_mutex);
5851	return 0;
5852bus_reset:
5853	mutex_unlock(&pci_slot_mutex);
5854	return pci_bus_reset(bridge->subordinate, PCI_RESET_DO_RESET);
5855}
5856
5857/**
5858 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5859 * @bus: PCI bus to probe
5860 *
5861 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5862 */
5863int pci_probe_reset_bus(struct pci_bus *bus)
5864{
5865	return pci_bus_reset(bus, PCI_RESET_PROBE);
5866}
5867EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5868
5869/**
5870 * __pci_reset_bus - Try to reset a PCI bus
5871 * @bus: top level PCI bus to reset
5872 *
5873 * Same as above except return -EAGAIN if the bus cannot be locked
5874 */
5875static int __pci_reset_bus(struct pci_bus *bus)
5876{
5877	int rc;
5878
5879	rc = pci_bus_reset(bus, PCI_RESET_PROBE);
5880	if (rc)
5881		return rc;
5882
5883	if (pci_bus_trylock(bus)) {
5884		pci_bus_save_and_disable_locked(bus);
5885		might_sleep();
5886		rc = pci_bridge_secondary_bus_reset(bus->self);
5887		pci_bus_restore_locked(bus);
5888		pci_bus_unlock(bus);
5889	} else
5890		rc = -EAGAIN;
5891
5892	return rc;
5893}
5894
5895/**
5896 * pci_reset_bus - Try to reset a PCI bus
5897 * @pdev: top level PCI device to reset via slot/bus
5898 *
5899 * Same as above except return -EAGAIN if the bus cannot be locked
5900 */
5901int pci_reset_bus(struct pci_dev *pdev)
5902{
5903	return (!pci_probe_reset_slot(pdev->slot)) ?
5904	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5905}
5906EXPORT_SYMBOL_GPL(pci_reset_bus);
5907
5908/**
5909 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5910 * @dev: PCI device to query
5911 *
5912 * Returns mmrbc: maximum designed memory read count in bytes or
5913 * appropriate error value.
5914 */
5915int pcix_get_max_mmrbc(struct pci_dev *dev)
5916{
5917	int cap;
5918	u32 stat;
5919
5920	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5921	if (!cap)
5922		return -EINVAL;
5923
5924	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5925		return -EINVAL;
5926
5927	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5928}
5929EXPORT_SYMBOL(pcix_get_max_mmrbc);
5930
5931/**
5932 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5933 * @dev: PCI device to query
5934 *
5935 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5936 * value.
5937 */
5938int pcix_get_mmrbc(struct pci_dev *dev)
5939{
5940	int cap;
5941	u16 cmd;
5942
5943	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5944	if (!cap)
5945		return -EINVAL;
5946
5947	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5948		return -EINVAL;
5949
5950	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5951}
5952EXPORT_SYMBOL(pcix_get_mmrbc);
5953
5954/**
5955 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5956 * @dev: PCI device to query
5957 * @mmrbc: maximum memory read count in bytes
5958 *    valid values are 512, 1024, 2048, 4096
5959 *
5960 * If possible sets maximum memory read byte count, some bridges have errata
5961 * that prevent this.
5962 */
5963int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5964{
5965	int cap;
5966	u32 stat, v, o;
5967	u16 cmd;
5968
5969	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5970		return -EINVAL;
5971
5972	v = ffs(mmrbc) - 10;
5973
5974	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5975	if (!cap)
5976		return -EINVAL;
5977
5978	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5979		return -EINVAL;
5980
5981	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5982		return -E2BIG;
5983
5984	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5985		return -EINVAL;
5986
5987	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5988	if (o != v) {
5989		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
 
5990			return -EIO;
5991
5992		cmd &= ~PCI_X_CMD_MAX_READ;
5993		cmd |= v << 2;
5994		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5995			return -EIO;
5996	}
5997	return 0;
5998}
5999EXPORT_SYMBOL(pcix_set_mmrbc);
6000
6001/**
6002 * pcie_get_readrq - get PCI Express read request size
6003 * @dev: PCI device to query
6004 *
6005 * Returns maximum memory read request in bytes or appropriate error value.
 
6006 */
6007int pcie_get_readrq(struct pci_dev *dev)
6008{
 
6009	u16 ctl;
6010
6011	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
6012
6013	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
 
 
 
 
6014}
6015EXPORT_SYMBOL(pcie_get_readrq);
6016
6017/**
6018 * pcie_set_readrq - set PCI Express maximum memory read request
6019 * @dev: PCI device to query
6020 * @rq: maximum memory read count in bytes
6021 *    valid values are 128, 256, 512, 1024, 2048, 4096
6022 *
6023 * If possible sets maximum memory read request in bytes
6024 */
6025int pcie_set_readrq(struct pci_dev *dev, int rq)
6026{
6027	u16 v;
6028	int ret;
6029
6030	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
6031		return -EINVAL;
 
 
 
 
6032
 
 
 
6033	/*
6034	 * If using the "performance" PCIe config, we clamp the read rq
6035	 * size to the max packet size to keep the host bridge from
6036	 * generating requests larger than we can cope with.
 
6037	 */
6038	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
6039		int mps = pcie_get_mps(dev);
6040
 
 
6041		if (mps < rq)
6042			rq = mps;
6043	}
6044
6045	v = (ffs(rq) - 8) << 12;
6046
6047	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6048						  PCI_EXP_DEVCTL_READRQ, v);
 
 
 
6049
6050	return pcibios_err_to_errno(ret);
 
6051}
6052EXPORT_SYMBOL(pcie_set_readrq);
6053
6054/**
6055 * pcie_get_mps - get PCI Express maximum payload size
6056 * @dev: PCI device to query
6057 *
6058 * Returns maximum payload size in bytes
 
6059 */
6060int pcie_get_mps(struct pci_dev *dev)
6061{
 
6062	u16 ctl;
6063
6064	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
 
 
 
 
6065
6066	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6067}
6068EXPORT_SYMBOL(pcie_get_mps);
6069
6070/**
6071 * pcie_set_mps - set PCI Express maximum payload size
6072 * @dev: PCI device to query
6073 * @mps: maximum payload size in bytes
6074 *    valid values are 128, 256, 512, 1024, 2048, 4096
6075 *
6076 * If possible sets maximum payload size
6077 */
6078int pcie_set_mps(struct pci_dev *dev, int mps)
6079{
6080	u16 v;
6081	int ret;
6082
6083	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
6084		return -EINVAL;
6085
6086	v = ffs(mps) - 8;
6087	if (v > dev->pcie_mpss)
6088		return -EINVAL;
6089	v <<= 5;
6090
6091	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
6092						  PCI_EXP_DEVCTL_PAYLOAD, v);
 
6093
6094	return pcibios_err_to_errno(ret);
6095}
6096EXPORT_SYMBOL(pcie_set_mps);
6097
6098/**
6099 * pcie_bandwidth_available - determine minimum link settings of a PCIe
6100 *			      device and its bandwidth limitation
6101 * @dev: PCI device to query
6102 * @limiting_dev: storage for device causing the bandwidth limitation
6103 * @speed: storage for speed of limiting device
6104 * @width: storage for width of limiting device
6105 *
6106 * Walk up the PCI device chain and find the point where the minimum
6107 * bandwidth is available.  Return the bandwidth available there and (if
6108 * limiting_dev, speed, and width pointers are supplied) information about
6109 * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
6110 * raw bandwidth.
6111 */
6112u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
6113			     enum pci_bus_speed *speed,
6114			     enum pcie_link_width *width)
6115{
6116	u16 lnksta;
6117	enum pci_bus_speed next_speed;
6118	enum pcie_link_width next_width;
6119	u32 bw, next_bw;
6120
6121	if (speed)
6122		*speed = PCI_SPEED_UNKNOWN;
6123	if (width)
6124		*width = PCIE_LNK_WIDTH_UNKNOWN;
6125
6126	bw = 0;
6127
6128	while (dev) {
6129		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
6130
6131		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
6132		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
6133			PCI_EXP_LNKSTA_NLW_SHIFT;
6134
6135		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
6136
6137		/* Check if current device limits the total bandwidth */
6138		if (!bw || next_bw <= bw) {
6139			bw = next_bw;
6140
6141			if (limiting_dev)
6142				*limiting_dev = dev;
6143			if (speed)
6144				*speed = next_speed;
6145			if (width)
6146				*width = next_width;
6147		}
6148
6149		dev = pci_upstream_bridge(dev);
6150	}
6151
6152	return bw;
6153}
6154EXPORT_SYMBOL(pcie_bandwidth_available);
6155
6156/**
6157 * pcie_get_speed_cap - query for the PCI device's link speed capability
6158 * @dev: PCI device to query
6159 *
6160 * Query the PCI device speed capability.  Return the maximum link speed
6161 * supported by the device.
6162 */
6163enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
6164{
6165	u32 lnkcap2, lnkcap;
6166
6167	/*
6168	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
6169	 * implementation note there recommends using the Supported Link
6170	 * Speeds Vector in Link Capabilities 2 when supported.
6171	 *
6172	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
6173	 * should use the Supported Link Speeds field in Link Capabilities,
6174	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
6175	 */
6176	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
6177
6178	/* PCIe r3.0-compliant */
6179	if (lnkcap2)
6180		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
6181
6182	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6183	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
6184		return PCIE_SPEED_5_0GT;
6185	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
6186		return PCIE_SPEED_2_5GT;
6187
6188	return PCI_SPEED_UNKNOWN;
6189}
6190EXPORT_SYMBOL(pcie_get_speed_cap);
6191
6192/**
6193 * pcie_get_width_cap - query for the PCI device's link width capability
6194 * @dev: PCI device to query
6195 *
6196 * Query the PCI device width capability.  Return the maximum link width
6197 * supported by the device.
6198 */
6199enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
6200{
6201	u32 lnkcap;
6202
6203	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
6204	if (lnkcap)
6205		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
6206
6207	return PCIE_LNK_WIDTH_UNKNOWN;
6208}
6209EXPORT_SYMBOL(pcie_get_width_cap);
6210
6211/**
6212 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
6213 * @dev: PCI device
6214 * @speed: storage for link speed
6215 * @width: storage for link width
6216 *
6217 * Calculate a PCI device's link bandwidth by querying for its link speed
6218 * and width, multiplying them, and applying encoding overhead.  The result
6219 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6220 */
6221u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6222			   enum pcie_link_width *width)
6223{
6224	*speed = pcie_get_speed_cap(dev);
6225	*width = pcie_get_width_cap(dev);
6226
6227	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6228		return 0;
6229
6230	return *width * PCIE_SPEED2MBS_ENC(*speed);
6231}
6232
6233/**
6234 * __pcie_print_link_status - Report the PCI device's link speed and width
6235 * @dev: PCI device to query
6236 * @verbose: Print info even when enough bandwidth is available
6237 *
6238 * If the available bandwidth at the device is less than the device is
6239 * capable of, report the device's maximum possible bandwidth and the
6240 * upstream link that limits its performance.  If @verbose, always print
6241 * the available bandwidth, even if the device isn't constrained.
6242 */
6243void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6244{
6245	enum pcie_link_width width, width_cap;
6246	enum pci_bus_speed speed, speed_cap;
6247	struct pci_dev *limiting_dev = NULL;
6248	u32 bw_avail, bw_cap;
6249
6250	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6251	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6252
6253	if (bw_avail >= bw_cap && verbose)
6254		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6255			 bw_cap / 1000, bw_cap % 1000,
6256			 pci_speed_string(speed_cap), width_cap);
6257	else if (bw_avail < bw_cap)
6258		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6259			 bw_avail / 1000, bw_avail % 1000,
6260			 pci_speed_string(speed), width,
6261			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6262			 bw_cap / 1000, bw_cap % 1000,
6263			 pci_speed_string(speed_cap), width_cap);
6264}
6265
6266/**
6267 * pcie_print_link_status - Report the PCI device's link speed and width
6268 * @dev: PCI device to query
6269 *
6270 * Report the available bandwidth at the device.
6271 */
6272void pcie_print_link_status(struct pci_dev *dev)
6273{
6274	__pcie_print_link_status(dev, true);
6275}
6276EXPORT_SYMBOL(pcie_print_link_status);
6277
6278/**
6279 * pci_select_bars - Make BAR mask from the type of resource
6280 * @dev: the PCI device for which BAR mask is made
6281 * @flags: resource type mask to be selected
6282 *
6283 * This helper routine makes bar mask from the type of resource.
6284 */
6285int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6286{
6287	int i, bars = 0;
6288	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6289		if (pci_resource_flags(dev, i) & flags)
6290			bars |= (1 << i);
6291	return bars;
6292}
6293EXPORT_SYMBOL(pci_select_bars);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6294
6295/* Some architectures require additional programming to enable VGA */
6296static arch_set_vga_state_t arch_set_vga_state;
6297
6298void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6299{
6300	arch_set_vga_state = func;	/* NULL disables */
6301}
6302
6303static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6304				  unsigned int command_bits, u32 flags)
6305{
6306	if (arch_set_vga_state)
6307		return arch_set_vga_state(dev, decode, command_bits,
6308						flags);
6309	return 0;
6310}
6311
6312/**
6313 * pci_set_vga_state - set VGA decode state on device and parents if requested
6314 * @dev: the PCI device
6315 * @decode: true = enable decoding, false = disable decoding
6316 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6317 * @flags: traverse ancestors and change bridges
6318 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6319 */
6320int pci_set_vga_state(struct pci_dev *dev, bool decode,
6321		      unsigned int command_bits, u32 flags)
6322{
6323	struct pci_bus *bus;
6324	struct pci_dev *bridge;
6325	u16 cmd;
6326	int rc;
6327
6328	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6329
6330	/* ARCH specific VGA enables */
6331	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6332	if (rc)
6333		return rc;
6334
6335	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6336		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6337		if (decode)
6338			cmd |= command_bits;
6339		else
6340			cmd &= ~command_bits;
6341		pci_write_config_word(dev, PCI_COMMAND, cmd);
6342	}
6343
6344	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6345		return 0;
6346
6347	bus = dev->bus;
6348	while (bus) {
6349		bridge = bus->self;
6350		if (bridge) {
6351			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6352					     &cmd);
6353			if (decode)
6354				cmd |= PCI_BRIDGE_CTL_VGA;
6355			else
6356				cmd &= ~PCI_BRIDGE_CTL_VGA;
6357			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6358					      cmd);
6359		}
6360		bus = bus->parent;
6361	}
6362	return 0;
6363}
6364
6365#ifdef CONFIG_ACPI
6366bool pci_pr3_present(struct pci_dev *pdev)
6367{
6368	struct acpi_device *adev;
6369
6370	if (acpi_disabled)
6371		return false;
6372
6373	adev = ACPI_COMPANION(&pdev->dev);
6374	if (!adev)
6375		return false;
6376
6377	return adev->power.flags.power_resources &&
6378		acpi_has_method(adev->handle, "_PR3");
6379}
6380EXPORT_SYMBOL_GPL(pci_pr3_present);
6381#endif
6382
6383/**
6384 * pci_add_dma_alias - Add a DMA devfn alias for a device
6385 * @dev: the PCI device for which alias is added
6386 * @devfn_from: alias slot and function
6387 * @nr_devfns: number of subsequent devfns to alias
6388 *
6389 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6390 * which is used to program permissible bus-devfn source addresses for DMA
6391 * requests in an IOMMU.  These aliases factor into IOMMU group creation
6392 * and are useful for devices generating DMA requests beyond or different
6393 * from their logical bus-devfn.  Examples include device quirks where the
6394 * device simply uses the wrong devfn, as well as non-transparent bridges
6395 * where the alias may be a proxy for devices in another domain.
6396 *
6397 * IOMMU group creation is performed during device discovery or addition,
6398 * prior to any potential DMA mapping and therefore prior to driver probing
6399 * (especially for userspace assigned devices where IOMMU group definition
6400 * cannot be left as a userspace activity).  DMA aliases should therefore
6401 * be configured via quirks, such as the PCI fixup header quirk.
6402 */
6403void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from,
6404		       unsigned int nr_devfns)
6405{
6406	int devfn_to;
6407
6408	nr_devfns = min(nr_devfns, (unsigned int)MAX_NR_DEVFNS - devfn_from);
6409	devfn_to = devfn_from + nr_devfns - 1;
6410
6411	if (!dev->dma_alias_mask)
6412		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6413	if (!dev->dma_alias_mask) {
6414		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6415		return;
6416	}
6417
6418	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6419
6420	if (nr_devfns == 1)
6421		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6422				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6423	else if (nr_devfns > 1)
6424		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6425				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6426				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6427}
6428
6429bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6430{
6431	return (dev1->dma_alias_mask &&
6432		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6433	       (dev2->dma_alias_mask &&
6434		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6435	       pci_real_dma_dev(dev1) == dev2 ||
6436	       pci_real_dma_dev(dev2) == dev1;
6437}
6438
6439bool pci_device_is_present(struct pci_dev *pdev)
6440{
6441	u32 v;
6442
6443	/* Check PF if pdev is a VF, since VF Vendor/Device IDs are 0xffff */
6444	pdev = pci_physfn(pdev);
6445	if (pci_dev_is_disconnected(pdev))
6446		return false;
6447	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6448}
6449EXPORT_SYMBOL_GPL(pci_device_is_present);
6450
6451void pci_ignore_hotplug(struct pci_dev *dev)
6452{
6453	struct pci_dev *bridge = dev->bus->self;
6454
6455	dev->ignore_hotplug = 1;
6456	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6457	if (bridge)
6458		bridge->ignore_hotplug = 1;
6459}
6460EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6461
6462/**
6463 * pci_real_dma_dev - Get PCI DMA device for PCI device
6464 * @dev: the PCI device that may have a PCI DMA alias
6465 *
6466 * Permits the platform to provide architecture-specific functionality to
6467 * devices needing to alias DMA to another PCI device on another PCI bus. If
6468 * the PCI device is on the same bus, it is recommended to use
6469 * pci_add_dma_alias(). This is the default implementation. Architecture
6470 * implementations can override this.
6471 */
6472struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6473{
6474	return dev;
6475}
6476
6477resource_size_t __weak pcibios_default_alignment(void)
6478{
6479	return 0;
6480}
6481
6482/*
6483 * Arches that don't want to expose struct resource to userland as-is in
6484 * sysfs and /proc can implement their own pci_resource_to_user().
6485 */
6486void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6487				 const struct resource *rsrc,
6488				 resource_size_t *start, resource_size_t *end)
6489{
6490	*start = rsrc->start;
6491	*end = rsrc->end;
6492}
6493
6494static char *resource_alignment_param;
6495static DEFINE_SPINLOCK(resource_alignment_lock);
6496
6497/**
6498 * pci_specified_resource_alignment - get resource alignment specified by user.
6499 * @dev: the PCI device to get
6500 * @resize: whether or not to change resources' size when reassigning alignment
6501 *
6502 * RETURNS: Resource alignment if it is specified.
6503 *          Zero if it is not specified.
6504 */
6505static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6506							bool *resize)
6507{
6508	int align_order, count;
6509	resource_size_t align = pcibios_default_alignment();
6510	const char *p;
6511	int ret;
6512
6513	spin_lock(&resource_alignment_lock);
6514	p = resource_alignment_param;
6515	if (!p || !*p)
6516		goto out;
6517	if (pci_has_flag(PCI_PROBE_ONLY)) {
6518		align = 0;
6519		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6520		goto out;
6521	}
6522
6523	while (*p) {
6524		count = 0;
6525		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6526		    p[count] == '@') {
6527			p += count + 1;
6528			if (align_order > 63) {
6529				pr_err("PCI: Invalid requested alignment (order %d)\n",
6530				       align_order);
6531				align_order = PAGE_SHIFT;
 
 
 
 
 
 
 
 
6532			}
6533		} else {
6534			align_order = PAGE_SHIFT;
6535		}
6536
6537		ret = pci_dev_str_match(dev, p, &p);
6538		if (ret == 1) {
6539			*resize = true;
6540			align = 1ULL << align_order;
6541			break;
6542		} else if (ret < 0) {
6543			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6544			       p);
 
 
6545			break;
6546		}
6547
6548		if (*p != ';' && *p != ',') {
6549			/* End of param or invalid format */
6550			break;
6551		}
6552		p++;
6553	}
6554out:
6555	spin_unlock(&resource_alignment_lock);
6556	return align;
6557}
6558
6559static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6560					   resource_size_t align, bool resize)
 
 
 
 
 
 
6561{
6562	struct resource *r = &dev->resource[bar];
6563	resource_size_t size;
6564
6565	if (!(r->flags & IORESOURCE_MEM))
6566		return;
6567
6568	if (r->flags & IORESOURCE_PCI_FIXED) {
6569		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6570			 bar, r, (unsigned long long)align);
6571		return;
6572	}
6573
6574	size = resource_size(r);
6575	if (size >= align)
6576		return;
6577
6578	/*
6579	 * Increase the alignment of the resource.  There are two ways we
6580	 * can do this:
6581	 *
6582	 * 1) Increase the size of the resource.  BARs are aligned on their
6583	 *    size, so when we reallocate space for this resource, we'll
6584	 *    allocate it with the larger alignment.  This also prevents
6585	 *    assignment of any other BARs inside the alignment region, so
6586	 *    if we're requesting page alignment, this means no other BARs
6587	 *    will share the page.
6588	 *
6589	 *    The disadvantage is that this makes the resource larger than
6590	 *    the hardware BAR, which may break drivers that compute things
6591	 *    based on the resource size, e.g., to find registers at a
6592	 *    fixed offset before the end of the BAR.
6593	 *
6594	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6595	 *    set r->start to the desired alignment.  By itself this
6596	 *    doesn't prevent other BARs being put inside the alignment
6597	 *    region, but if we realign *every* resource of every device in
6598	 *    the system, none of them will share an alignment region.
6599	 *
6600	 * When the user has requested alignment for only some devices via
6601	 * the "pci=resource_alignment" argument, "resize" is true and we
6602	 * use the first method.  Otherwise we assume we're aligning all
6603	 * devices and we use the second.
6604	 */
6605
6606	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6607		 bar, r, (unsigned long long)align);
6608
6609	if (resize) {
6610		r->start = 0;
6611		r->end = align - 1;
6612	} else {
6613		r->flags &= ~IORESOURCE_SIZEALIGN;
6614		r->flags |= IORESOURCE_STARTALIGN;
6615		r->start = align;
6616		r->end = r->start + size - 1;
6617	}
6618	r->flags |= IORESOURCE_UNSET;
6619}
6620
6621/*
6622 * This function disables memory decoding and releases memory resources
6623 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6624 * It also rounds up size to specified alignment.
6625 * Later on, the kernel will assign page-aligned memory resource back
6626 * to the device.
6627 */
6628void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6629{
6630	int i;
6631	struct resource *r;
6632	resource_size_t align;
6633	u16 command;
6634	bool resize = false;
6635
6636	/*
6637	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6638	 * 3.4.1.11.  Their resources are allocated from the space
6639	 * described by the VF BARx register in the PF's SR-IOV capability.
6640	 * We can't influence their alignment here.
6641	 */
6642	if (dev->is_virtfn)
6643		return;
6644
6645	/* check if specified PCI is target device to reassign */
6646	align = pci_specified_resource_alignment(dev, &resize);
6647	if (!align)
6648		return;
6649
6650	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6651	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6652		pci_warn(dev, "Can't reassign resources to host bridge\n");
 
6653		return;
6654	}
6655
 
 
6656	pci_read_config_word(dev, PCI_COMMAND, &command);
6657	command &= ~PCI_COMMAND_MEMORY;
6658	pci_write_config_word(dev, PCI_COMMAND, command);
6659
6660	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6661		pci_request_resource_alignment(dev, i, align, resize);
6662
6663	/*
6664	 * Need to disable bridge's resource window,
 
 
 
 
 
 
 
 
 
 
 
6665	 * to enable the kernel to reassign new resource
6666	 * window later on.
6667	 */
6668	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
 
6669		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6670			r = &dev->resource[i];
6671			if (!(r->flags & IORESOURCE_MEM))
6672				continue;
6673			r->flags |= IORESOURCE_UNSET;
6674			r->end = resource_size(r) - 1;
6675			r->start = 0;
6676		}
6677		pci_disable_bridge_window(dev);
6678	}
6679}
6680
6681static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6682{
6683	size_t count = 0;
6684
6685	spin_lock(&resource_alignment_lock);
6686	if (resource_alignment_param)
6687		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6688	spin_unlock(&resource_alignment_lock);
6689
6690	return count;
6691}
6692
6693static ssize_t resource_alignment_store(struct bus_type *bus,
6694					const char *buf, size_t count)
6695{
6696	char *param, *old, *end;
6697
6698	if (count >= (PAGE_SIZE - 1))
6699		return -EINVAL;
6700
6701	param = kstrndup(buf, count, GFP_KERNEL);
6702	if (!param)
6703		return -ENOMEM;
6704
6705	end = strchr(param, '\n');
6706	if (end)
6707		*end = '\0';
6708
6709	spin_lock(&resource_alignment_lock);
6710	old = resource_alignment_param;
6711	if (strlen(param)) {
6712		resource_alignment_param = param;
6713	} else {
6714		kfree(param);
6715		resource_alignment_param = NULL;
6716	}
6717	spin_unlock(&resource_alignment_lock);
 
 
6718
6719	kfree(old);
 
 
 
6720
6721	return count;
 
 
 
6722}
6723
6724static BUS_ATTR_RW(resource_alignment);
 
6725
6726static int __init pci_resource_alignment_sysfs_init(void)
6727{
6728	return bus_create_file(&pci_bus_type,
6729					&bus_attr_resource_alignment);
6730}
 
6731late_initcall(pci_resource_alignment_sysfs_init);
6732
6733static void pci_no_domains(void)
6734{
6735#ifdef CONFIG_PCI_DOMAINS
6736	pci_domains_supported = 0;
6737#endif
6738}
6739
6740#ifdef CONFIG_PCI_DOMAINS_GENERIC
6741static DEFINE_IDA(pci_domain_nr_static_ida);
6742static DEFINE_IDA(pci_domain_nr_dynamic_ida);
6743
6744static void of_pci_reserve_static_domain_nr(void)
6745{
6746	struct device_node *np;
6747	int domain_nr;
6748
6749	for_each_node_by_type(np, "pci") {
6750		domain_nr = of_get_pci_domain_nr(np);
6751		if (domain_nr < 0)
6752			continue;
6753		/*
6754		 * Permanently allocate domain_nr in dynamic_ida
6755		 * to prevent it from dynamic allocation.
6756		 */
6757		ida_alloc_range(&pci_domain_nr_dynamic_ida,
6758				domain_nr, domain_nr, GFP_KERNEL);
6759	}
6760}
6761
6762static int of_pci_bus_find_domain_nr(struct device *parent)
6763{
6764	static bool static_domains_reserved = false;
6765	int domain_nr;
6766
6767	/* On the first call scan device tree for static allocations. */
6768	if (!static_domains_reserved) {
6769		of_pci_reserve_static_domain_nr();
6770		static_domains_reserved = true;
6771	}
6772
6773	if (parent) {
6774		/*
6775		 * If domain is in DT, allocate it in static IDA.  This
6776		 * prevents duplicate static allocations in case of errors
6777		 * in DT.
6778		 */
6779		domain_nr = of_get_pci_domain_nr(parent->of_node);
6780		if (domain_nr >= 0)
6781			return ida_alloc_range(&pci_domain_nr_static_ida,
6782					       domain_nr, domain_nr,
6783					       GFP_KERNEL);
6784	}
6785
6786	/*
6787	 * If domain was not specified in DT, choose a free ID from dynamic
6788	 * allocations. All domain numbers from DT are permanently in
6789	 * dynamic allocations to prevent assigning them to other DT nodes
6790	 * without static domain.
6791	 */
6792	return ida_alloc(&pci_domain_nr_dynamic_ida, GFP_KERNEL);
6793}
6794
6795static void of_pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
6796{
6797	if (bus->domain_nr < 0)
6798		return;
6799
6800	/* Release domain from IDA where it was allocated. */
6801	if (of_get_pci_domain_nr(parent->of_node) == bus->domain_nr)
6802		ida_free(&pci_domain_nr_static_ida, bus->domain_nr);
6803	else
6804		ida_free(&pci_domain_nr_dynamic_ida, bus->domain_nr);
6805}
6806
6807int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6808{
6809	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6810			       acpi_pci_bus_find_domain_nr(bus);
6811}
6812
6813void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent)
6814{
6815	if (!acpi_disabled)
6816		return;
6817	of_pci_bus_release_domain_nr(bus, parent);
6818}
6819#endif
6820
6821/**
6822 * pci_ext_cfg_avail - can we access extended PCI config space?
 
6823 *
6824 * Returns 1 if we can access PCI extended config space (offsets
6825 * greater than 0xff). This is the default implementation. Architecture
6826 * implementations can override this.
6827 */
6828int __weak pci_ext_cfg_avail(void)
6829{
6830	return 1;
6831}
6832
6833void __weak pci_fixup_cardbus(struct pci_bus *bus)
6834{
6835}
6836EXPORT_SYMBOL(pci_fixup_cardbus);
6837
6838static int __init pci_setup(char *str)
6839{
6840	while (str) {
6841		char *k = strchr(str, ',');
6842		if (k)
6843			*k++ = 0;
6844		if (*str && (str = pcibios_setup(str)) && *str) {
6845			if (!strcmp(str, "nomsi")) {
6846				pci_no_msi();
6847			} else if (!strncmp(str, "noats", 5)) {
6848				pr_info("PCIe: ATS is disabled\n");
6849				pcie_ats_disabled = true;
6850			} else if (!strcmp(str, "noaer")) {
6851				pci_no_aer();
6852			} else if (!strcmp(str, "earlydump")) {
6853				pci_early_dump = true;
6854			} else if (!strncmp(str, "realloc=", 8)) {
6855				pci_realloc_get_opt(str + 8);
6856			} else if (!strncmp(str, "realloc", 7)) {
6857				pci_realloc_get_opt("on");
6858			} else if (!strcmp(str, "nodomains")) {
6859				pci_no_domains();
6860			} else if (!strncmp(str, "noari", 5)) {
6861				pcie_ari_disabled = true;
6862			} else if (!strncmp(str, "cbiosize=", 9)) {
6863				pci_cardbus_io_size = memparse(str + 9, &str);
6864			} else if (!strncmp(str, "cbmemsize=", 10)) {
6865				pci_cardbus_mem_size = memparse(str + 10, &str);
6866			} else if (!strncmp(str, "resource_alignment=", 19)) {
6867				resource_alignment_param = str + 19;
 
6868			} else if (!strncmp(str, "ecrc=", 5)) {
6869				pcie_ecrc_get_policy(str + 5);
6870			} else if (!strncmp(str, "hpiosize=", 9)) {
6871				pci_hotplug_io_size = memparse(str + 9, &str);
6872			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6873				pci_hotplug_mmio_size = memparse(str + 11, &str);
6874			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6875				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6876			} else if (!strncmp(str, "hpmemsize=", 10)) {
6877				pci_hotplug_mmio_size = memparse(str + 10, &str);
6878				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6879			} else if (!strncmp(str, "hpbussize=", 10)) {
6880				pci_hotplug_bus_size =
6881					simple_strtoul(str + 10, &str, 0);
6882				if (pci_hotplug_bus_size > 0xff)
6883					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6884			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6885				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6886			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6887				pcie_bus_config = PCIE_BUS_SAFE;
6888			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6889				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6890			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6891				pcie_bus_config = PCIE_BUS_PEER2PEER;
6892			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6893				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6894			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6895				disable_acs_redir_param = str + 18;
6896			} else {
6897				pr_err("PCI: Unknown option `%s'\n", str);
 
6898			}
6899		}
6900		str = k;
6901	}
6902	return 0;
6903}
6904early_param("pci", pci_setup);
6905
6906/*
6907 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6908 * in pci_setup(), above, to point to data in the __initdata section which
6909 * will be freed after the init sequence is complete. We can't allocate memory
6910 * in pci_setup() because some architectures do not have any memory allocation
6911 * service available during an early_param() call. So we allocate memory and
6912 * copy the variable here before the init section is freed.
6913 *
6914 */
6915static int __init pci_realloc_setup_params(void)
6916{
6917	resource_alignment_param = kstrdup(resource_alignment_param,
6918					   GFP_KERNEL);
6919	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
6920
6921	return 0;
6922}
6923pure_initcall(pci_realloc_setup_params);