Linux Audio

Check our new training course

Linux BSP upgrade and security maintenance

Need help to get security updates for your Linux BSP?
Loading...
v3.5.6
 
   1/*
   2 *	PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *	David Mosberger-Tang
   6 *
   7 *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
 
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
 
  12#include <linux/init.h>
 
 
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
  25#include <asm-generic/pci-bridge.h>
  26#include <asm/setup.h>
 
 
  27#include "pci.h"
  28
 
 
  29const char *pci_power_names[] = {
  30	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  31};
  32EXPORT_SYMBOL_GPL(pci_power_names);
  33
  34int isa_dma_bridge_buggy;
  35EXPORT_SYMBOL(isa_dma_bridge_buggy);
  36
  37int pci_pci_problems;
  38EXPORT_SYMBOL(pci_pci_problems);
  39
  40unsigned int pci_pm_d3_delay;
  41
  42static void pci_pme_list_scan(struct work_struct *work);
  43
  44static LIST_HEAD(pci_pme_list);
  45static DEFINE_MUTEX(pci_pme_list_mutex);
  46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  47
  48struct pci_pme_device {
  49	struct list_head list;
  50	struct pci_dev *dev;
  51};
  52
  53#define PME_TIMEOUT 1000 /* How long between PME checks */
  54
  55static void pci_dev_d3_sleep(struct pci_dev *dev)
  56{
  57	unsigned int delay = dev->d3_delay;
  58
  59	if (delay < pci_pm_d3_delay)
  60		delay = pci_pm_d3_delay;
  61
  62	msleep(delay);
 
  63}
  64
  65#ifdef CONFIG_PCI_DOMAINS
  66int pci_domains_supported = 1;
  67#endif
  68
  69#define DEFAULT_CARDBUS_IO_SIZE		(256)
  70#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  74
  75#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  76#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
 
  78unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
 
 
 
 
 
 
 
 
 
  80
 
 
 
  81enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
 
 
 
 
 
 
 
 
 
  82
  83/*
  84 * The default CLS is used if arch didn't set CLS explicitly and not
  85 * all pci devices agree on the same value.  Arch can override either
  86 * the dfl or actual value as it sees fit.  Don't forget this is
  87 * measured in 32-bit words, not bytes.
  88 */
  89u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  90u8 pci_cache_line_size;
  91
  92/*
  93 * If we set up a device for bus mastering, we need to check the latency
  94 * timer as certain BIOSes forget to set it properly.
  95 */
  96unsigned int pcibios_max_latency = 255;
  97
  98/* If set, the PCIe ARI capability will not be used. */
  99static bool pcie_ari_disabled;
 100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 101/**
 102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 103 * @bus: pointer to PCI bus structure to search
 104 *
 105 * Given a PCI bus, returns the highest PCI bus number present in the set
 106 * including the given PCI bus and its list of child PCI buses.
 107 */
 108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
 109{
 110	struct list_head *tmp;
 111	unsigned char max, n;
 112
 113	max = bus->subordinate;
 114	list_for_each(tmp, &bus->children) {
 115		n = pci_bus_max_busnr(pci_bus_b(tmp));
 116		if(n > max)
 117			max = n;
 118	}
 119	return max;
 120}
 121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 122
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123#ifdef CONFIG_HAS_IOMEM
 124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 125{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 126	/*
 127	 * Make sure the BAR is actually a memory resource, not an IO resource
 128	 */
 129	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 130		WARN_ON(1);
 131		return NULL;
 132	}
 133	return ioremap_nocache(pci_resource_start(pdev, bar),
 134				     pci_resource_len(pdev, bar));
 135}
 136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 137#endif
 138
 139#if 0
 140/**
 141 * pci_max_busnr - returns maximum PCI bus number
 
 
 
 
 
 
 
 
 
 
 
 
 
 142 *
 143 * Returns the highest PCI bus number present in the system global list of
 144 * PCI buses.
 145 */
 146unsigned char __devinit
 147pci_max_busnr(void)
 148{
 149	struct pci_bus *bus = NULL;
 150	unsigned char max, n;
 
 
 151
 152	max = 0;
 153	while ((bus = pci_find_next_bus(bus)) != NULL) {
 154		n = pci_bus_max_busnr(bus);
 155		if(n > max)
 156			max = n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 157	}
 158	return max;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 159}
 160
 161#endif  /*  0  */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 162
 163#define PCI_FIND_CAP_TTL	48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 164
 165static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 166				   u8 pos, int cap, int *ttl)
 
 
 
 
 
 
 
 
 167{
 168	u8 id;
 
 
 
 169
 170	while ((*ttl)--) {
 171		pci_bus_read_config_byte(bus, devfn, pos, &pos);
 172		if (pos < 0x40)
 173			break;
 174		pos &= ~3;
 175		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 176					 &id);
 
 177		if (id == 0xff)
 178			break;
 179		if (id == cap)
 180			return pos;
 181		pos += PCI_CAP_LIST_NEXT;
 182	}
 183	return 0;
 184}
 185
 186static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 187			       u8 pos, int cap)
 188{
 189	int ttl = PCI_FIND_CAP_TTL;
 190
 191	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 192}
 193
 194int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 195{
 196	return __pci_find_next_cap(dev->bus, dev->devfn,
 197				   pos + PCI_CAP_LIST_NEXT, cap);
 198}
 199EXPORT_SYMBOL_GPL(pci_find_next_capability);
 200
 201static int __pci_bus_find_cap_start(struct pci_bus *bus,
 202				    unsigned int devfn, u8 hdr_type)
 203{
 204	u16 status;
 205
 206	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 207	if (!(status & PCI_STATUS_CAP_LIST))
 208		return 0;
 209
 210	switch (hdr_type) {
 211	case PCI_HEADER_TYPE_NORMAL:
 212	case PCI_HEADER_TYPE_BRIDGE:
 213		return PCI_CAPABILITY_LIST;
 214	case PCI_HEADER_TYPE_CARDBUS:
 215		return PCI_CB_CAPABILITY_LIST;
 216	default:
 217		return 0;
 218	}
 219
 220	return 0;
 221}
 222
 223/**
 224 * pci_find_capability - query for devices' capabilities 
 225 * @dev: PCI device to query
 226 * @cap: capability code
 227 *
 228 * Tell if a device supports a given PCI capability.
 229 * Returns the address of the requested capability structure within the
 230 * device's PCI configuration space or 0 in case the device does not
 231 * support it.  Possible values for @cap:
 232 *
 233 *  %PCI_CAP_ID_PM           Power Management 
 234 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 235 *  %PCI_CAP_ID_VPD          Vital Product Data 
 236 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 237 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 238 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 239 *  %PCI_CAP_ID_PCIX         PCI-X
 240 *  %PCI_CAP_ID_EXP          PCI Express
 241 */
 242int pci_find_capability(struct pci_dev *dev, int cap)
 243{
 244	int pos;
 245
 246	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 247	if (pos)
 248		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 249
 250	return pos;
 251}
 
 252
 253/**
 254 * pci_bus_find_capability - query for devices' capabilities 
 255 * @bus:   the PCI bus to query
 256 * @devfn: PCI device to query
 257 * @cap:   capability code
 258 *
 259 * Like pci_find_capability() but works for pci devices that do not have a
 260 * pci_dev structure set up yet. 
 261 *
 262 * Returns the address of the requested capability structure within the
 263 * device's PCI configuration space or 0 in case the device does not
 264 * support it.
 265 */
 266int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 267{
 268	int pos;
 269	u8 hdr_type;
 270
 271	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 272
 273	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 274	if (pos)
 275		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 276
 277	return pos;
 278}
 
 279
 280/**
 281 * pci_find_ext_capability - Find an extended capability
 282 * @dev: PCI device to query
 
 283 * @cap: capability code
 284 *
 285 * Returns the address of the requested extended capability structure
 286 * within the device's PCI configuration space or 0 if the device does
 287 * not support it.  Possible values for @cap:
 288 *
 289 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 290 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 291 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 292 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 293 */
 294int pci_find_ext_capability(struct pci_dev *dev, int cap)
 295{
 296	u32 header;
 297	int ttl;
 298	int pos = PCI_CFG_SPACE_SIZE;
 299
 300	/* minimum 8 bytes per capability */
 301	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 302
 303	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 304		return 0;
 305
 
 
 
 306	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 307		return 0;
 308
 309	/*
 310	 * If we have no capabilities, this is indicated by cap ID,
 311	 * cap version and next pointer all being 0.
 312	 */
 313	if (header == 0)
 314		return 0;
 315
 316	while (ttl-- > 0) {
 317		if (PCI_EXT_CAP_ID(header) == cap)
 318			return pos;
 319
 320		pos = PCI_EXT_CAP_NEXT(header);
 321		if (pos < PCI_CFG_SPACE_SIZE)
 322			break;
 323
 324		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 325			break;
 326	}
 327
 328	return 0;
 329}
 330EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 331
 332/**
 333 * pci_bus_find_ext_capability - find an extended capability
 334 * @bus:   the PCI bus to query
 335 * @devfn: PCI device to query
 336 * @cap:   capability code
 337 *
 338 * Like pci_find_ext_capability() but works for pci devices that do not have a
 339 * pci_dev structure set up yet.
 
 340 *
 341 * Returns the address of the requested capability structure within the
 342 * device's PCI configuration space or 0 in case the device does not
 343 * support it.
 
 344 */
 345int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
 346				int cap)
 347{
 348	u32 header;
 349	int ttl;
 350	int pos = PCI_CFG_SPACE_SIZE;
 351
 352	/* minimum 8 bytes per capability */
 353	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 
 
 
 
 
 
 
 
 
 
 
 
 354
 355	if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 356		return 0;
 357	if (header == 0xffffffff || header == 0)
 358		return 0;
 359
 360	while (ttl-- > 0) {
 361		if (PCI_EXT_CAP_ID(header) == cap)
 362			return pos;
 363
 364		pos = PCI_EXT_CAP_NEXT(header);
 365		if (pos < PCI_CFG_SPACE_SIZE)
 366			break;
 367
 368		if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 369			break;
 370	}
 371
 372	return 0;
 373}
 
 374
 375static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 376{
 377	int rc, ttl = PCI_FIND_CAP_TTL;
 378	u8 cap, mask;
 379
 380	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 381		mask = HT_3BIT_CAP_MASK;
 382	else
 383		mask = HT_5BIT_CAP_MASK;
 384
 385	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 386				      PCI_CAP_ID_HT, &ttl);
 387	while (pos) {
 388		rc = pci_read_config_byte(dev, pos + 3, &cap);
 389		if (rc != PCIBIOS_SUCCESSFUL)
 390			return 0;
 391
 392		if ((cap & mask) == ht_cap)
 393			return pos;
 394
 395		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 396					      pos + PCI_CAP_LIST_NEXT,
 397					      PCI_CAP_ID_HT, &ttl);
 398	}
 399
 400	return 0;
 401}
 
 402/**
 403 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 404 * @dev: PCI device to query
 405 * @pos: Position from which to continue searching
 406 * @ht_cap: Hypertransport capability code
 407 *
 408 * To be used in conjunction with pci_find_ht_capability() to search for
 409 * all capabilities matching @ht_cap. @pos should always be a value returned
 410 * from pci_find_ht_capability().
 411 *
 412 * NB. To be 100% safe against broken PCI devices, the caller should take
 413 * steps to avoid an infinite loop.
 414 */
 415int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 416{
 417	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 418}
 419EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 420
 421/**
 422 * pci_find_ht_capability - query a device's Hypertransport capabilities
 423 * @dev: PCI device to query
 424 * @ht_cap: Hypertransport capability code
 425 *
 426 * Tell if a device supports a given Hypertransport capability.
 427 * Returns an address within the device's PCI configuration space
 428 * or 0 in case the device does not support the request capability.
 429 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 430 * which has a Hypertransport capability matching @ht_cap.
 431 */
 432int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 433{
 434	int pos;
 435
 436	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 437	if (pos)
 438		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 439
 440	return pos;
 441}
 442EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 443
 444/**
 445 * pci_find_parent_resource - return resource region of parent bus of given region
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446 * @dev: PCI device structure contains resources to be searched
 447 * @res: child resource record for which parent is sought
 448 *
 449 *  For given resource region of given device, return the resource
 450 *  region of parent bus the given region is contained in or where
 451 *  it should be allocated from.
 452 */
 453struct resource *
 454pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 455{
 456	const struct pci_bus *bus = dev->bus;
 
 457	int i;
 458	struct resource *best = NULL, *r;
 459
 460	pci_bus_for_each_resource(bus, r, i) {
 461		if (!r)
 462			continue;
 463		if (res->start && !(res->start >= r->start && res->end <= r->end))
 464			continue;	/* Not contained */
 465		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 466			continue;	/* Wrong type */
 467		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 468			return r;	/* Exact match */
 469		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 470		if (r->flags & IORESOURCE_PREFETCH)
 471			continue;
 472		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
 473		if (!best)
 474			best = r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 475	}
 476	return best;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477}
 478
 479/**
 480 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 481 * @dev: PCI device to have its BARs restored
 482 *
 483 * Restore the BAR values for a given device, so as to make it
 484 * accessible by its driver.
 485 */
 486static void
 487pci_restore_bars(struct pci_dev *dev)
 488{
 489	int i;
 490
 491	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 492		pci_update_resource(dev, i);
 493}
 494
 495static struct pci_platform_pm_ops *pci_platform_pm;
 496
 497int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 498{
 499	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 500	    || !ops->sleep_wake || !ops->can_wakeup)
 501		return -EINVAL;
 502	pci_platform_pm = ops;
 503	return 0;
 504}
 505
 506static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 507{
 508	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 509}
 510
 511static inline int platform_pci_set_power_state(struct pci_dev *dev,
 512                                                pci_power_t t)
 513{
 514	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 515}
 516
 
 
 
 
 
 
 
 
 
 
 
 517static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 518{
 519	return pci_platform_pm ?
 520			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 521}
 522
 523static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 524{
 525	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 
 526}
 527
 528static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 529{
 530	return pci_platform_pm ?
 531			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 532}
 533
 534static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 535{
 536	return pci_platform_pm ?
 537			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 
 538}
 539
 540/**
 541 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 542 *                           given PCI device
 543 * @dev: PCI device to handle.
 544 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 545 *
 546 * RETURN VALUE:
 547 * -EINVAL if the requested state is invalid.
 548 * -EIO if device does not support PCI PM or its PM capabilities register has a
 549 * wrong version, or device doesn't support the requested state.
 550 * 0 if device already is in the requested state.
 551 * 0 if device's power state has been successfully changed.
 552 */
 553static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 554{
 555	u16 pmcsr;
 556	bool need_restore = false;
 557
 558	/* Check if we're already there */
 559	if (dev->current_state == state)
 560		return 0;
 561
 562	if (!dev->pm_cap)
 563		return -EIO;
 564
 565	if (state < PCI_D0 || state > PCI_D3hot)
 566		return -EINVAL;
 567
 568	/* Validate current state:
 569	 * Can enter D0 from any state, but if we can only go deeper 
 570	 * to sleep if we're already in a low power state
 
 
 571	 */
 572	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 573	    && dev->current_state > state) {
 574		dev_err(&dev->dev, "invalid power transition "
 575			"(from state %d to %d)\n", dev->current_state, state);
 
 576		return -EINVAL;
 577	}
 578
 579	/* check if this device supports the desired state */
 580	if ((state == PCI_D1 && !dev->d1_support)
 581	   || (state == PCI_D2 && !dev->d2_support))
 582		return -EIO;
 583
 584	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 
 
 
 
 
 
 585
 586	/* If we're (effectively) in D3, force entire word to 0.
 
 587	 * This doesn't affect PME_Status, disables PME_En, and
 588	 * sets PowerState to 0.
 589	 */
 590	switch (dev->current_state) {
 591	case PCI_D0:
 592	case PCI_D1:
 593	case PCI_D2:
 594		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 595		pmcsr |= state;
 596		break;
 597	case PCI_D3hot:
 598	case PCI_D3cold:
 599	case PCI_UNKNOWN: /* Boot-up */
 600		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 601		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 602			need_restore = true;
 603		/* Fall-through: force to D0 */
 604	default:
 605		pmcsr = 0;
 606		break;
 607	}
 608
 609	/* enter specified state */
 610	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 611
 612	/* Mandatory power management transition delays */
 613	/* see PCI PM 1.1 5.6.1 table 18 */
 
 
 614	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 615		pci_dev_d3_sleep(dev);
 616	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 617		udelay(PCI_PM_D2_DELAY);
 618
 619	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 620	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 621	if (dev->current_state != state && printk_ratelimit())
 622		dev_info(&dev->dev, "Refused to change power state, "
 623			"currently in D%d\n", dev->current_state);
 
 624
 625	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 
 626	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 627	 * from D3hot to D0 _may_ perform an internal reset, thereby
 628	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 629	 * For example, at least some versions of the 3c905B and the
 630	 * 3c556B exhibit this behaviour.
 631	 *
 632	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 633	 * devices in a D3hot state at boot.  Consequently, we need to
 634	 * restore at least the BARs so that the device will be
 635	 * accessible to its driver.
 636	 */
 637	if (need_restore)
 638		pci_restore_bars(dev);
 639
 640	if (dev->bus->self)
 641		pcie_aspm_pm_state_change(dev->bus->self);
 642
 643	return 0;
 644}
 645
 646/**
 647 * pci_update_current_state - Read PCI power state of given device from its
 648 *                            PCI PM registers and cache it
 649 * @dev: PCI device to handle.
 650 * @state: State to cache in case the device doesn't have the PM capability
 
 
 
 
 
 
 
 651 */
 652void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 653{
 654	if (dev->pm_cap) {
 
 
 
 655		u16 pmcsr;
 656
 657		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 658		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 659	} else {
 660		dev->current_state = state;
 661	}
 662}
 663
 664/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 665 * pci_platform_power_transition - Use platform to change device power state
 666 * @dev: PCI device to handle.
 667 * @state: State to put the device into.
 668 */
 669static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 670{
 671	int error;
 672
 673	if (platform_pci_power_manageable(dev)) {
 674		error = platform_pci_set_power_state(dev, state);
 675		if (!error)
 676			pci_update_current_state(dev, state);
 677		/* Fall back to PCI_D0 if native PM is not supported */
 678		if (!dev->pm_cap)
 679			dev->current_state = PCI_D0;
 680	} else {
 681		error = -ENODEV;
 682		/* Fall back to PCI_D0 if native PM is not supported */
 683		if (!dev->pm_cap)
 684			dev->current_state = PCI_D0;
 685	}
 686
 687	return error;
 688}
 
 
 
 
 
 
 
 689
 690/**
 691 * __pci_start_power_transition - Start power transition of a PCI device
 692 * @dev: PCI device to handle.
 693 * @state: State to put the device into.
 694 */
 695static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 696{
 697	if (state == PCI_D0)
 698		pci_platform_power_transition(dev, PCI_D0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699}
 700
 701/**
 702 * __pci_complete_power_transition - Complete power transition of a PCI device
 703 * @dev: PCI device to handle.
 704 * @state: State to put the device into.
 705 *
 706 * This function should not be called directly by device drivers.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 707 */
 708int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 709{
 710	return state >= PCI_D0 ?
 711			pci_platform_power_transition(dev, state) : -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 712}
 713EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 714
 715/**
 716 * pci_set_power_state - Set the power state of a PCI device
 717 * @dev: PCI device to handle.
 718 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 719 *
 720 * Transition a device to a new power state, using the platform firmware and/or
 721 * the device's PCI PM registers.
 722 *
 723 * RETURN VALUE:
 724 * -EINVAL if the requested state is invalid.
 725 * -EIO if device does not support PCI PM or its PM capabilities register has a
 726 * wrong version, or device doesn't support the requested state.
 
 727 * 0 if device already is in the requested state.
 
 728 * 0 if device's power state has been successfully changed.
 729 */
 730int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 731{
 732	int error;
 733
 734	/* bound the state we're entering */
 735	if (state > PCI_D3hot)
 736		state = PCI_D3hot;
 737	else if (state < PCI_D0)
 738		state = PCI_D0;
 739	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 
 740		/*
 741		 * If the device or the parent bridge do not support PCI PM,
 742		 * ignore the request if we're doing anything other than putting
 743		 * it into D0 (which would only happen on boot).
 
 744		 */
 745		return 0;
 746
 747	__pci_start_power_transition(dev, state);
 748
 749	/* This device is quirked not to be put into D3, so
 750	   don't put it in D3 */
 751	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 752		return 0;
 753
 754	error = pci_raw_set_power_state(dev, state);
 
 755
 756	if (!__pci_complete_power_transition(dev, state))
 757		error = 0;
 758	/*
 759	 * When aspm_policy is "powersave" this call ensures
 760	 * that ASPM is configured.
 761	 */
 762	if (!error && dev->bus->self)
 763		pcie_aspm_powersave_config_link(dev->bus->self);
 764
 765	return error;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 766}
 
 767
 768/**
 769 * pci_choose_state - Choose the power state of a PCI device
 770 * @dev: PCI device to be suspended
 771 * @state: target sleep state for the whole system. This is the value
 772 *	that is passed to suspend() function.
 773 *
 774 * Returns PCI power state suitable for given device and given system
 775 * message.
 776 */
 777
 778pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 779{
 780	pci_power_t ret;
 781
 782	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 783		return PCI_D0;
 784
 785	ret = platform_pci_choose_state(dev);
 786	if (ret != PCI_POWER_ERROR)
 787		return ret;
 788
 789	switch (state.event) {
 790	case PM_EVENT_ON:
 791		return PCI_D0;
 792	case PM_EVENT_FREEZE:
 793	case PM_EVENT_PRETHAW:
 794		/* REVISIT both freeze and pre-thaw "should" use D0 */
 795	case PM_EVENT_SUSPEND:
 796	case PM_EVENT_HIBERNATE:
 797		return PCI_D3hot;
 798	default:
 799		dev_info(&dev->dev, "unrecognized suspend event %d\n",
 800			 state.event);
 801		BUG();
 802	}
 803	return PCI_D0;
 804}
 805
 806EXPORT_SYMBOL(pci_choose_state);
 807
 808#define PCI_EXP_SAVE_REGS	7
 809
 810#define pcie_cap_has_devctl(type, flags)	1
 811#define pcie_cap_has_lnkctl(type, flags)		\
 812		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 813		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 814		  type == PCI_EXP_TYPE_ENDPOINT ||	\
 815		  type == PCI_EXP_TYPE_LEG_END))
 816#define pcie_cap_has_sltctl(type, flags)		\
 817		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 818		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
 819		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
 820		   (flags & PCI_EXP_FLAGS_SLOT))))
 821#define pcie_cap_has_rtctl(type, flags)			\
 822		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 823		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 824		  type == PCI_EXP_TYPE_RC_EC))
 825#define pcie_cap_has_devctl2(type, flags)		\
 826		((flags & PCI_EXP_FLAGS_VERS) > 1)
 827#define pcie_cap_has_lnkctl2(type, flags)		\
 828		((flags & PCI_EXP_FLAGS_VERS) > 1)
 829#define pcie_cap_has_sltctl2(type, flags)		\
 830		((flags & PCI_EXP_FLAGS_VERS) > 1)
 831
 832static struct pci_cap_saved_state *pci_find_saved_cap(
 833	struct pci_dev *pci_dev, char cap)
 834{
 835	struct pci_cap_saved_state *tmp;
 836	struct hlist_node *pos;
 837
 838	hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
 839		if (tmp->cap.cap_nr == cap)
 840			return tmp;
 841	}
 842	return NULL;
 843}
 844
 
 
 
 
 
 
 
 
 
 
 845static int pci_save_pcie_state(struct pci_dev *dev)
 846{
 847	int pos, i = 0;
 848	struct pci_cap_saved_state *save_state;
 849	u16 *cap;
 850	u16 flags;
 851
 852	pos = pci_pcie_cap(dev);
 853	if (!pos)
 854		return 0;
 855
 856	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 857	if (!save_state) {
 858		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 859		return -ENOMEM;
 860	}
 861	cap = (u16 *)&save_state->cap.data[0];
 862
 863	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 864
 865	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 866		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
 867	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 868		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 869	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 870		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 871	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 872		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
 873	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 874		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
 875	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 876		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
 877	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 878		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
 879
 880	return 0;
 881}
 882
 883static void pci_restore_pcie_state(struct pci_dev *dev)
 884{
 885	int i = 0, pos;
 886	struct pci_cap_saved_state *save_state;
 887	u16 *cap;
 888	u16 flags;
 889
 890	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 891	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 892	if (!save_state || pos <= 0)
 893		return;
 894	cap = (u16 *)&save_state->cap.data[0];
 895
 896	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 897
 898	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 899		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
 900	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 901		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
 902	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 903		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
 904	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 905		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
 906	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 907		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
 908	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 909		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
 910	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 911		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
 912}
 913
 914
 915static int pci_save_pcix_state(struct pci_dev *dev)
 916{
 917	int pos;
 918	struct pci_cap_saved_state *save_state;
 919
 920	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 921	if (pos <= 0)
 922		return 0;
 923
 924	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 925	if (!save_state) {
 926		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 927		return -ENOMEM;
 928	}
 929
 930	pci_read_config_word(dev, pos + PCI_X_CMD,
 931			     (u16 *)save_state->cap.data);
 932
 933	return 0;
 934}
 935
 936static void pci_restore_pcix_state(struct pci_dev *dev)
 937{
 938	int i = 0, pos;
 939	struct pci_cap_saved_state *save_state;
 940	u16 *cap;
 941
 942	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 943	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 944	if (!save_state || pos <= 0)
 945		return;
 946	cap = (u16 *)&save_state->cap.data[0];
 947
 948	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 949}
 950
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951
 952/**
 953 * pci_save_state - save the PCI configuration space of a device before suspending
 954 * @dev: - PCI device that we're dealing with
 
 955 */
 956int
 957pci_save_state(struct pci_dev *dev)
 958{
 959	int i;
 960	/* XXX: 100% dword access ok here? */
 961	for (i = 0; i < 16; i++)
 962		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 
 
 
 963	dev->state_saved = true;
 964	if ((i = pci_save_pcie_state(dev)) != 0)
 
 
 965		return i;
 966	if ((i = pci_save_pcix_state(dev)) != 0)
 
 
 967		return i;
 968	return 0;
 
 
 
 
 
 969}
 
 970
 971static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
 972				     u32 saved_val, int retry)
 973{
 974	u32 val;
 975
 976	pci_read_config_dword(pdev, offset, &val);
 977	if (val == saved_val)
 978		return;
 979
 980	for (;;) {
 981		dev_dbg(&pdev->dev, "restoring config space at offset "
 982			"%#x (was %#x, writing %#x)\n", offset, val, saved_val);
 983		pci_write_config_dword(pdev, offset, saved_val);
 984		if (retry-- <= 0)
 985			return;
 986
 987		pci_read_config_dword(pdev, offset, &val);
 988		if (val == saved_val)
 989			return;
 990
 991		mdelay(1);
 992	}
 993}
 994
 995static void pci_restore_config_space_range(struct pci_dev *pdev,
 996					   int start, int end, int retry)
 
 997{
 998	int index;
 999
1000	for (index = end; index >= start; index--)
1001		pci_restore_config_dword(pdev, 4 * index,
1002					 pdev->saved_config_space[index],
1003					 retry);
1004}
1005
1006static void pci_restore_config_space(struct pci_dev *pdev)
1007{
1008	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1009		pci_restore_config_space_range(pdev, 10, 15, 0);
1010		/* Restore BARs before the command register. */
1011		pci_restore_config_space_range(pdev, 4, 9, 10);
1012		pci_restore_config_space_range(pdev, 0, 3, 0);
 
 
 
 
 
 
 
 
 
 
1013	} else {
1014		pci_restore_config_space_range(pdev, 0, 15, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1015	}
1016}
1017
1018/** 
1019 * pci_restore_state - Restore the saved state of a PCI device
1020 * @dev: - PCI device that we're dealing with
1021 */
1022void pci_restore_state(struct pci_dev *dev)
1023{
1024	if (!dev->state_saved)
1025		return;
1026
1027	/* PCI Express register must be restored first */
 
 
 
 
 
1028	pci_restore_pcie_state(dev);
 
 
1029	pci_restore_ats_state(dev);
 
 
 
 
 
 
 
1030
1031	pci_restore_config_space(dev);
1032
1033	pci_restore_pcix_state(dev);
1034	pci_restore_msi_state(dev);
 
 
 
1035	pci_restore_iov_state(dev);
1036
1037	dev->state_saved = false;
1038}
 
1039
1040struct pci_saved_state {
1041	u32 config_space[16];
1042	struct pci_cap_saved_data cap[0];
1043};
1044
1045/**
1046 * pci_store_saved_state - Allocate and return an opaque struct containing
1047 *			   the device saved state.
1048 * @dev: PCI device that we're dealing with
1049 *
1050 * Rerturn NULL if no state or error.
1051 */
1052struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1053{
1054	struct pci_saved_state *state;
1055	struct pci_cap_saved_state *tmp;
1056	struct pci_cap_saved_data *cap;
1057	struct hlist_node *pos;
1058	size_t size;
1059
1060	if (!dev->state_saved)
1061		return NULL;
1062
1063	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1064
1065	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1066		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1067
1068	state = kzalloc(size, GFP_KERNEL);
1069	if (!state)
1070		return NULL;
1071
1072	memcpy(state->config_space, dev->saved_config_space,
1073	       sizeof(state->config_space));
1074
1075	cap = state->cap;
1076	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1077		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1078		memcpy(cap, &tmp->cap, len);
1079		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1080	}
1081	/* Empty cap_save terminates list */
1082
1083	return state;
1084}
1085EXPORT_SYMBOL_GPL(pci_store_saved_state);
1086
1087/**
1088 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1089 * @dev: PCI device that we're dealing with
1090 * @state: Saved state returned from pci_store_saved_state()
1091 */
1092int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
 
1093{
1094	struct pci_cap_saved_data *cap;
1095
1096	dev->state_saved = false;
1097
1098	if (!state)
1099		return 0;
1100
1101	memcpy(dev->saved_config_space, state->config_space,
1102	       sizeof(state->config_space));
1103
1104	cap = state->cap;
1105	while (cap->size) {
1106		struct pci_cap_saved_state *tmp;
1107
1108		tmp = pci_find_saved_cap(dev, cap->cap_nr);
1109		if (!tmp || tmp->cap.size != cap->size)
1110			return -EINVAL;
1111
1112		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1113		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1114		       sizeof(struct pci_cap_saved_data) + cap->size);
1115	}
1116
1117	dev->state_saved = true;
1118	return 0;
1119}
1120EXPORT_SYMBOL_GPL(pci_load_saved_state);
1121
1122/**
1123 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1124 *				   and free the memory allocated for it.
1125 * @dev: PCI device that we're dealing with
1126 * @state: Pointer to saved state returned from pci_store_saved_state()
1127 */
1128int pci_load_and_free_saved_state(struct pci_dev *dev,
1129				  struct pci_saved_state **state)
1130{
1131	int ret = pci_load_saved_state(dev, *state);
1132	kfree(*state);
1133	*state = NULL;
1134	return ret;
1135}
1136EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1137
 
 
 
 
 
1138static int do_pci_enable_device(struct pci_dev *dev, int bars)
1139{
1140	int err;
 
 
 
1141
1142	err = pci_set_power_state(dev, PCI_D0);
1143	if (err < 0 && err != -EIO)
1144		return err;
 
 
 
 
 
1145	err = pcibios_enable_device(dev, bars);
1146	if (err < 0)
1147		return err;
1148	pci_fixup_device(pci_fixup_enable, dev);
1149
 
 
 
 
 
 
 
 
 
 
 
1150	return 0;
1151}
1152
1153/**
1154 * pci_reenable_device - Resume abandoned device
1155 * @dev: PCI device to be resumed
1156 *
1157 *  Note this function is a backend of pci_default_resume and is not supposed
1158 *  to be called by normal code, write proper resume handler and use it instead.
1159 */
1160int pci_reenable_device(struct pci_dev *dev)
1161{
1162	if (pci_is_enabled(dev))
1163		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1164	return 0;
1165}
 
1166
1167static int __pci_enable_device_flags(struct pci_dev *dev,
1168				     resource_size_t flags)
1169{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170	int err;
1171	int i, bars = 0;
1172
1173	/*
1174	 * Power state could be unknown at this point, either due to a fresh
1175	 * boot or a device removal call.  So get the current power state
1176	 * so that things like MSI message writing will behave as expected
1177	 * (e.g. if the device really is in D0 at enable time).
1178	 */
1179	if (dev->pm_cap) {
1180		u16 pmcsr;
1181		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1182		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1183	}
1184
1185	if (atomic_add_return(1, &dev->enable_cnt) > 1)
1186		return 0;		/* already enabled */
1187
 
 
 
 
1188	/* only skip sriov related */
1189	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1190		if (dev->resource[i].flags & flags)
1191			bars |= (1 << i);
1192	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1193		if (dev->resource[i].flags & flags)
1194			bars |= (1 << i);
1195
1196	err = do_pci_enable_device(dev, bars);
1197	if (err < 0)
1198		atomic_dec(&dev->enable_cnt);
1199	return err;
1200}
1201
1202/**
1203 * pci_enable_device_io - Initialize a device for use with IO space
1204 * @dev: PCI device to be initialized
1205 *
1206 *  Initialize device before it's used by a driver. Ask low-level code
1207 *  to enable I/O resources. Wake up the device if it was suspended.
1208 *  Beware, this function can fail.
1209 */
1210int pci_enable_device_io(struct pci_dev *dev)
1211{
1212	return __pci_enable_device_flags(dev, IORESOURCE_IO);
1213}
 
1214
1215/**
1216 * pci_enable_device_mem - Initialize a device for use with Memory space
1217 * @dev: PCI device to be initialized
1218 *
1219 *  Initialize device before it's used by a driver. Ask low-level code
1220 *  to enable Memory resources. Wake up the device if it was suspended.
1221 *  Beware, this function can fail.
1222 */
1223int pci_enable_device_mem(struct pci_dev *dev)
1224{
1225	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1226}
 
1227
1228/**
1229 * pci_enable_device - Initialize device before it's used by a driver.
1230 * @dev: PCI device to be initialized
1231 *
1232 *  Initialize device before it's used by a driver. Ask low-level code
1233 *  to enable I/O and memory. Wake up the device if it was suspended.
1234 *  Beware, this function can fail.
1235 *
1236 *  Note we don't actually enable the device many times if we call
1237 *  this function repeatedly (we just increment the count).
1238 */
1239int pci_enable_device(struct pci_dev *dev)
1240{
1241	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1242}
 
1243
1244/*
1245 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1246 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1247 * there's no need to track it separately.  pci_devres is initialized
1248 * when a device is enabled using managed PCI device enable interface.
1249 */
1250struct pci_devres {
1251	unsigned int enabled:1;
1252	unsigned int pinned:1;
1253	unsigned int orig_intx:1;
1254	unsigned int restore_intx:1;
 
1255	u32 region_mask;
1256};
1257
1258static void pcim_release(struct device *gendev, void *res)
1259{
1260	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1261	struct pci_devres *this = res;
1262	int i;
1263
1264	if (dev->msi_enabled)
1265		pci_disable_msi(dev);
1266	if (dev->msix_enabled)
1267		pci_disable_msix(dev);
1268
1269	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1270		if (this->region_mask & (1 << i))
1271			pci_release_region(dev, i);
1272
 
 
 
1273	if (this->restore_intx)
1274		pci_intx(dev, this->orig_intx);
1275
1276	if (this->enabled && !this->pinned)
1277		pci_disable_device(dev);
1278}
1279
1280static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1281{
1282	struct pci_devres *dr, *new_dr;
1283
1284	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1285	if (dr)
1286		return dr;
1287
1288	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1289	if (!new_dr)
1290		return NULL;
1291	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1292}
1293
1294static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1295{
1296	if (pci_is_managed(pdev))
1297		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1298	return NULL;
1299}
1300
1301/**
1302 * pcim_enable_device - Managed pci_enable_device()
1303 * @pdev: PCI device to be initialized
1304 *
1305 * Managed pci_enable_device().
1306 */
1307int pcim_enable_device(struct pci_dev *pdev)
1308{
1309	struct pci_devres *dr;
1310	int rc;
1311
1312	dr = get_pci_dr(pdev);
1313	if (unlikely(!dr))
1314		return -ENOMEM;
1315	if (dr->enabled)
1316		return 0;
1317
1318	rc = pci_enable_device(pdev);
1319	if (!rc) {
1320		pdev->is_managed = 1;
1321		dr->enabled = 1;
1322	}
1323	return rc;
1324}
 
1325
1326/**
1327 * pcim_pin_device - Pin managed PCI device
1328 * @pdev: PCI device to pin
1329 *
1330 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1331 * driver detach.  @pdev must have been enabled with
1332 * pcim_enable_device().
1333 */
1334void pcim_pin_device(struct pci_dev *pdev)
1335{
1336	struct pci_devres *dr;
1337
1338	dr = find_pci_dr(pdev);
1339	WARN_ON(!dr || !dr->enabled);
1340	if (dr)
1341		dr->pinned = 1;
1342}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1343
1344/**
1345 * pcibios_disable_device - disable arch specific PCI resources for device dev
1346 * @dev: the PCI device to disable
1347 *
1348 * Disables architecture specific PCI resources for the device. This
1349 * is the default implementation. Architecture implementations can
1350 * override this.
1351 */
1352void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
 
 
 
 
 
 
 
 
 
 
 
1353
1354static void do_pci_disable_device(struct pci_dev *dev)
1355{
1356	u16 pci_command;
1357
1358	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1359	if (pci_command & PCI_COMMAND_MASTER) {
1360		pci_command &= ~PCI_COMMAND_MASTER;
1361		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1362	}
1363
1364	pcibios_disable_device(dev);
1365}
1366
1367/**
1368 * pci_disable_enabled_device - Disable device without updating enable_cnt
1369 * @dev: PCI device to disable
1370 *
1371 * NOTE: This function is a backend of PCI power management routines and is
1372 * not supposed to be called drivers.
1373 */
1374void pci_disable_enabled_device(struct pci_dev *dev)
1375{
1376	if (pci_is_enabled(dev))
1377		do_pci_disable_device(dev);
1378}
1379
1380/**
1381 * pci_disable_device - Disable PCI device after use
1382 * @dev: PCI device to be disabled
1383 *
1384 * Signal to the system that the PCI device is not in use by the system
1385 * anymore.  This only involves disabling PCI bus-mastering, if active.
1386 *
1387 * Note we don't actually disable the device until all callers of
1388 * pci_enable_device() have called pci_disable_device().
1389 */
1390void
1391pci_disable_device(struct pci_dev *dev)
1392{
1393	struct pci_devres *dr;
1394
1395	dr = find_pci_dr(dev);
1396	if (dr)
1397		dr->enabled = 0;
1398
1399	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
 
 
 
1400		return;
1401
1402	do_pci_disable_device(dev);
1403
1404	dev->is_busmaster = 0;
1405}
 
1406
1407/**
1408 * pcibios_set_pcie_reset_state - set reset state for device dev
1409 * @dev: the PCIe device reset
1410 * @state: Reset state to enter into
1411 *
1412 *
1413 * Sets the PCIe reset state for the device. This is the default
1414 * implementation. Architecture implementations can override this.
1415 */
1416int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1417							enum pcie_reset_state state)
1418{
1419	return -EINVAL;
1420}
1421
1422/**
1423 * pci_set_pcie_reset_state - set reset state for device dev
1424 * @dev: the PCIe device reset
1425 * @state: Reset state to enter into
1426 *
1427 *
1428 * Sets the PCI reset state for the device.
1429 */
1430int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1431{
1432	return pcibios_set_pcie_reset_state(dev, state);
1433}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1434
1435/**
1436 * pci_check_pme_status - Check if given device has generated PME.
1437 * @dev: Device to check.
1438 *
1439 * Check the PME status of the device and if set, clear it and clear PME enable
1440 * (if set).  Return 'true' if PME status and PME enable were both set or
1441 * 'false' otherwise.
1442 */
1443bool pci_check_pme_status(struct pci_dev *dev)
1444{
1445	int pmcsr_pos;
1446	u16 pmcsr;
1447	bool ret = false;
1448
1449	if (!dev->pm_cap)
1450		return false;
1451
1452	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1453	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1454	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1455		return false;
1456
1457	/* Clear PME status. */
1458	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1459	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1460		/* Disable PME to avoid interrupt flood. */
1461		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1462		ret = true;
1463	}
1464
1465	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1466
1467	return ret;
1468}
1469
1470/**
1471 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1472 * @dev: Device to handle.
1473 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1474 *
1475 * Check if @dev has generated PME and queue a resume request for it in that
1476 * case.
1477 */
1478static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1479{
1480	if (pme_poll_reset && dev->pme_poll)
1481		dev->pme_poll = false;
1482
1483	if (pci_check_pme_status(dev)) {
1484		pci_wakeup_event(dev);
1485		pm_request_resume(&dev->dev);
1486	}
1487	return 0;
1488}
1489
1490/**
1491 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1492 * @bus: Top bus of the subtree to walk.
1493 */
1494void pci_pme_wakeup_bus(struct pci_bus *bus)
1495{
1496	if (bus)
1497		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1498}
1499
 
1500/**
1501 * pci_pme_capable - check the capability of PCI device to generate PME#
1502 * @dev: PCI device to handle.
1503 * @state: PCI state from which device will issue PME#.
1504 */
1505bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1506{
1507	if (!dev->pm_cap)
1508		return false;
1509
1510	return !!(dev->pme_support & (1 << state));
1511}
 
1512
1513static void pci_pme_list_scan(struct work_struct *work)
1514{
1515	struct pci_pme_device *pme_dev, *n;
1516
1517	mutex_lock(&pci_pme_list_mutex);
1518	if (!list_empty(&pci_pme_list)) {
1519		list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1520			if (pme_dev->dev->pme_poll) {
1521				pci_pme_wakeup(pme_dev->dev, NULL);
1522			} else {
1523				list_del(&pme_dev->list);
1524				kfree(pme_dev);
1525			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1526		}
1527		if (!list_empty(&pci_pme_list))
1528			schedule_delayed_work(&pci_pme_work,
1529					      msecs_to_jiffies(PME_TIMEOUT));
1530	}
 
 
 
1531	mutex_unlock(&pci_pme_list_mutex);
1532}
1533
1534/**
1535 * pci_pme_active - enable or disable PCI device's PME# function
1536 * @dev: PCI device to handle.
1537 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1538 *
1539 * The caller must verify that the device is capable of generating PME# before
1540 * calling this function with @enable equal to 'true'.
1541 */
1542void pci_pme_active(struct pci_dev *dev, bool enable)
1543{
1544	u16 pmcsr;
1545
1546	if (!dev->pm_cap)
1547		return;
1548
1549	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1550	/* Clear PME_Status by writing 1 to it and enable PME# */
1551	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1552	if (!enable)
1553		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1554
1555	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 
1556
1557	/* PCI (as opposed to PCIe) PME requires that the device have
1558	   its PME# line hooked up correctly. Not all hardware vendors
1559	   do this, so the PME never gets delivered and the device
1560	   remains asleep. The easiest way around this is to
1561	   periodically walk the list of suspended devices and check
1562	   whether any have their PME flag set. The assumption is that
1563	   we'll wake up often enough anyway that this won't be a huge
1564	   hit, and the power savings from the devices will still be a
1565	   win. */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1566
1567	if (dev->pme_poll) {
1568		struct pci_pme_device *pme_dev;
1569		if (enable) {
1570			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1571					  GFP_KERNEL);
1572			if (!pme_dev)
1573				goto out;
 
 
1574			pme_dev->dev = dev;
1575			mutex_lock(&pci_pme_list_mutex);
1576			list_add(&pme_dev->list, &pci_pme_list);
1577			if (list_is_singular(&pci_pme_list))
1578				schedule_delayed_work(&pci_pme_work,
1579						      msecs_to_jiffies(PME_TIMEOUT));
 
1580			mutex_unlock(&pci_pme_list_mutex);
1581		} else {
1582			mutex_lock(&pci_pme_list_mutex);
1583			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1584				if (pme_dev->dev == dev) {
1585					list_del(&pme_dev->list);
1586					kfree(pme_dev);
1587					break;
1588				}
1589			}
1590			mutex_unlock(&pci_pme_list_mutex);
1591		}
1592	}
1593
1594out:
1595	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
1596}
 
1597
1598/**
1599 * __pci_enable_wake - enable PCI device as wakeup event source
1600 * @dev: PCI device affected
1601 * @state: PCI state from which device will issue wakeup events
1602 * @runtime: True if the events are to be generated at run time
1603 * @enable: True to enable event generation; false to disable
1604 *
1605 * This enables the device as a wakeup event source, or disables it.
1606 * When such events involves platform-specific hooks, those hooks are
1607 * called automatically by this routine.
1608 *
1609 * Devices with legacy power management (no standard PCI PM capabilities)
1610 * always require such platform hooks.
1611 *
1612 * RETURN VALUE:
1613 * 0 is returned on success
1614 * -EINVAL is returned if device is not supposed to wake up the system
1615 * Error code depending on the platform is returned if both the platform and
1616 * the native mechanism fail to enable the generation of wake-up events
1617 */
1618int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1619		      bool runtime, bool enable)
1620{
1621	int ret = 0;
1622
1623	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1624		return -EINVAL;
 
 
 
 
 
 
 
1625
1626	/* Don't do the same thing twice in a row for one device. */
1627	if (!!enable == !!dev->wakeup_prepared)
1628		return 0;
1629
1630	/*
1631	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1632	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1633	 * enable.  To disable wake-up we call the platform first, for symmetry.
1634	 */
1635
1636	if (enable) {
1637		int error;
1638
1639		if (pci_pme_capable(dev, state))
 
 
 
 
 
 
 
1640			pci_pme_active(dev, true);
1641		else
1642			ret = 1;
1643		error = runtime ? platform_pci_run_wake(dev, true) :
1644					platform_pci_sleep_wake(dev, true);
1645		if (ret)
1646			ret = error;
1647		if (!ret)
1648			dev->wakeup_prepared = true;
1649	} else {
1650		if (runtime)
1651			platform_pci_run_wake(dev, false);
1652		else
1653			platform_pci_sleep_wake(dev, false);
1654		pci_pme_active(dev, false);
1655		dev->wakeup_prepared = false;
1656	}
1657
1658	return ret;
1659}
1660EXPORT_SYMBOL(__pci_enable_wake);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1661
1662/**
1663 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1664 * @dev: PCI device to prepare
1665 * @enable: True to enable wake-up event generation; false to disable
1666 *
1667 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1668 * and this function allows them to set that up cleanly - pci_enable_wake()
1669 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1670 * ordering constraints.
1671 *
1672 * This function only returns error code if the device is not capable of
1673 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1674 * enable wake-up power for it.
1675 */
1676int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1677{
1678	return pci_pme_capable(dev, PCI_D3cold) ?
1679			pci_enable_wake(dev, PCI_D3cold, enable) :
1680			pci_enable_wake(dev, PCI_D3hot, enable);
1681}
 
1682
1683/**
1684 * pci_target_state - find an appropriate low power state for a given PCI dev
1685 * @dev: PCI device
 
1686 *
1687 * Use underlying platform code to find a supported low power state for @dev.
1688 * If the platform can't manage @dev, return the deepest state from which it
1689 * can generate wake events, based on any available PME info.
1690 */
1691pci_power_t pci_target_state(struct pci_dev *dev)
1692{
1693	pci_power_t target_state = PCI_D3hot;
1694
1695	if (platform_pci_power_manageable(dev)) {
1696		/*
1697		 * Call the platform to choose the target state of the device
1698		 * and enable wake-up from this state if supported.
1699		 */
1700		pci_power_t state = platform_pci_choose_state(dev);
1701
1702		switch (state) {
1703		case PCI_POWER_ERROR:
1704		case PCI_UNKNOWN:
1705			break;
1706		case PCI_D1:
1707		case PCI_D2:
1708			if (pci_no_d1d2(dev))
1709				break;
 
1710		default:
1711			target_state = state;
1712		}
1713	} else if (!dev->pm_cap) {
 
 
 
 
1714		target_state = PCI_D0;
1715	} else if (device_may_wakeup(&dev->dev)) {
 
 
 
 
 
 
 
 
 
 
 
1716		/*
1717		 * Find the deepest state from which the device can generate
1718		 * wake-up events, make it the target state and enable device
1719		 * to generate PME#.
1720		 */
1721		if (dev->pme_support) {
1722			while (target_state
1723			      && !(dev->pme_support & (1 << target_state)))
1724				target_state--;
1725		}
 
 
1726	}
1727
1728	return target_state;
1729}
1730
1731/**
1732 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
 
1733 * @dev: Device to handle.
1734 *
1735 * Choose the power state appropriate for the device depending on whether
1736 * it can wake up the system and/or is power manageable by the platform
1737 * (PCI_D3hot is the default) and put the device into that state.
1738 */
1739int pci_prepare_to_sleep(struct pci_dev *dev)
1740{
1741	pci_power_t target_state = pci_target_state(dev);
 
1742	int error;
1743
1744	if (target_state == PCI_POWER_ERROR)
1745		return -EIO;
1746
1747	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
 
 
 
 
 
 
 
 
 
 
1748
1749	error = pci_set_power_state(dev, target_state);
1750
1751	if (error)
1752		pci_enable_wake(dev, target_state, false);
 
 
1753
1754	return error;
1755}
 
1756
1757/**
1758 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
 
1759 * @dev: Device to handle.
1760 *
1761 * Disable device's system wake-up capability and put it into D0.
1762 */
1763int pci_back_from_sleep(struct pci_dev *dev)
1764{
1765	pci_enable_wake(dev, PCI_D0, false);
1766	return pci_set_power_state(dev, PCI_D0);
1767}
 
1768
1769/**
1770 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1771 * @dev: PCI device being suspended.
1772 *
1773 * Prepare @dev to generate wake-up events at run time and put it into a low
1774 * power state.
1775 */
1776int pci_finish_runtime_suspend(struct pci_dev *dev)
1777{
1778	pci_power_t target_state = pci_target_state(dev);
1779	int error;
1780
 
1781	if (target_state == PCI_POWER_ERROR)
1782		return -EIO;
1783
1784	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
 
 
 
 
 
 
 
 
 
 
 
 
1785
1786	error = pci_set_power_state(dev, target_state);
1787
1788	if (error)
1789		__pci_enable_wake(dev, target_state, true, false);
 
 
 
1790
1791	return error;
1792}
1793
1794/**
1795 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1796 * @dev: Device to check.
1797 *
1798 * Return true if the device itself is cabable of generating wake-up events
1799 * (through the platform or using the native PCIe PME) or if the device supports
1800 * PME and one of its upstream bridges can generate wake-up events.
1801 */
1802bool pci_dev_run_wake(struct pci_dev *dev)
1803{
1804	struct pci_bus *bus = dev->bus;
1805
1806	if (device_run_wake(&dev->dev))
1807		return true;
1808
1809	if (!dev->pme_support)
1810		return false;
1811
 
 
 
 
 
 
 
1812	while (bus->parent) {
1813		struct pci_dev *bridge = bus->self;
1814
1815		if (device_run_wake(&bridge->dev))
1816			return true;
1817
1818		bus = bus->parent;
1819	}
1820
1821	/* We have reached the root bus. */
1822	if (bus->bridge)
1823		return device_run_wake(bus->bridge);
1824
1825	return false;
1826}
1827EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1828
1829/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1830 * pci_pm_init - Initialize PM functions of given PCI device
1831 * @dev: PCI device to handle.
1832 */
1833void pci_pm_init(struct pci_dev *dev)
1834{
1835	int pm;
 
1836	u16 pmc;
1837
1838	pm_runtime_forbid(&dev->dev);
 
 
1839	device_enable_async_suspend(&dev->dev);
1840	dev->wakeup_prepared = false;
1841
1842	dev->pm_cap = 0;
 
1843
1844	/* find PCI PM capability in list */
1845	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1846	if (!pm)
1847		return;
1848	/* Check device's ability to generate PME# */
1849	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1850
1851	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1852		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1853			pmc & PCI_PM_CAP_VER_MASK);
1854		return;
1855	}
1856
1857	dev->pm_cap = pm;
1858	dev->d3_delay = PCI_PM_D3_WAIT;
 
 
 
1859
1860	dev->d1_support = false;
1861	dev->d2_support = false;
1862	if (!pci_no_d1d2(dev)) {
1863		if (pmc & PCI_PM_CAP_D1)
1864			dev->d1_support = true;
1865		if (pmc & PCI_PM_CAP_D2)
1866			dev->d2_support = true;
1867
1868		if (dev->d1_support || dev->d2_support)
1869			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1870				   dev->d1_support ? " D1" : "",
1871				   dev->d2_support ? " D2" : "");
1872	}
1873
1874	pmc &= PCI_PM_CAP_PME_MASK;
1875	if (pmc) {
1876		dev_printk(KERN_DEBUG, &dev->dev,
1877			 "PME# supported from%s%s%s%s%s\n",
1878			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1879			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1880			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1881			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1882			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1883		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
1884		dev->pme_poll = true;
1885		/*
1886		 * Make device's PM flags reflect the wake-up capability, but
1887		 * let the user space enable it to wake up the system as needed.
1888		 */
1889		device_set_wakeup_capable(&dev->dev, true);
1890		/* Disable the PME# generation functionality */
1891		pci_pme_active(dev, false);
1892	} else {
1893		dev->pme_support = 0;
1894	}
 
 
 
 
1895}
1896
1897/**
1898 * platform_pci_wakeup_init - init platform wakeup if present
1899 * @dev: PCI device
1900 *
1901 * Some devices don't have PCI PM caps but can still generate wakeup
1902 * events through platform methods (like ACPI events).  If @dev supports
1903 * platform wakeup events, set the device flag to indicate as much.  This
1904 * may be redundant if the device also supports PCI PM caps, but double
1905 * initialization should be safe in that case.
1906 */
1907void platform_pci_wakeup_init(struct pci_dev *dev)
1908{
1909	if (!platform_pci_can_wakeup(dev))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1910		return;
1911
1912	device_set_wakeup_capable(&dev->dev, true);
1913	platform_pci_sleep_wake(dev, false);
 
 
 
 
 
 
 
 
 
 
 
 
1914}
1915
1916static void pci_add_saved_cap(struct pci_dev *pci_dev,
1917	struct pci_cap_saved_state *new_cap)
1918{
1919	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1920}
1921
1922/**
1923 * pci_add_save_buffer - allocate buffer for saving given capability registers
 
1924 * @dev: the PCI device
1925 * @cap: the capability to allocate the buffer for
 
1926 * @size: requested size of the buffer
1927 */
1928static int pci_add_cap_save_buffer(
1929	struct pci_dev *dev, char cap, unsigned int size)
1930{
1931	int pos;
1932	struct pci_cap_saved_state *save_state;
1933
1934	pos = pci_find_capability(dev, cap);
1935	if (pos <= 0)
 
 
 
 
1936		return 0;
1937
1938	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1939	if (!save_state)
1940		return -ENOMEM;
1941
1942	save_state->cap.cap_nr = cap;
 
1943	save_state->cap.size = size;
1944	pci_add_saved_cap(dev, save_state);
1945
1946	return 0;
1947}
1948
 
 
 
 
 
 
 
 
 
 
1949/**
1950 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1951 * @dev: the PCI device
1952 */
1953void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1954{
1955	int error;
1956
1957	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1958					PCI_EXP_SAVE_REGS * sizeof(u16));
1959	if (error)
1960		dev_err(&dev->dev,
1961			"unable to preallocate PCI Express save buffer\n");
1962
1963	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1964	if (error)
1965		dev_err(&dev->dev,
1966			"unable to preallocate PCI-X save buffer\n");
 
 
 
 
 
 
1967}
1968
1969void pci_free_cap_save_buffers(struct pci_dev *dev)
1970{
1971	struct pci_cap_saved_state *tmp;
1972	struct hlist_node *pos, *n;
1973
1974	hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
1975		kfree(tmp);
1976}
1977
1978/**
1979 * pci_enable_ari - enable ARI forwarding if hardware support it
1980 * @dev: the PCI device
 
 
 
1981 */
1982void pci_enable_ari(struct pci_dev *dev)
1983{
1984	int pos;
1985	u32 cap;
1986	u16 flags, ctrl;
1987	struct pci_dev *bridge;
1988
1989	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
1990		return;
1991
1992	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1993	if (!pos)
1994		return;
1995
1996	bridge = dev->bus->self;
1997	if (!bridge || !pci_is_pcie(bridge))
1998		return;
1999
2000	pos = pci_pcie_cap(bridge);
2001	if (!pos)
2002		return;
2003
2004	/* ARI is a PCIe v2 feature */
2005	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
2006	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
2007		return;
2008
2009	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
2010	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2011		return;
2012
2013	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
2014	ctrl |= PCI_EXP_DEVCTL2_ARI;
2015	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
2016
2017	bridge->ari_enabled = 1;
 
 
 
 
2018}
2019
2020/**
2021 * pci_enable_ido - enable ID-based ordering on a device
2022 * @dev: the PCI device
2023 * @type: which types of IDO to enable
2024 *
2025 * Enable ID-based ordering on @dev.  @type can contain the bits
2026 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2027 * which types of transactions are allowed to be re-ordered.
2028 */
2029void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2030{
2031	int pos;
2032	u16 ctrl;
2033
2034	pos = pci_pcie_cap(dev);
2035	if (!pos)
2036		return;
2037
2038	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2039	if (type & PCI_EXP_IDO_REQUEST)
2040		ctrl |= PCI_EXP_IDO_REQ_EN;
2041	if (type & PCI_EXP_IDO_COMPLETION)
2042		ctrl |= PCI_EXP_IDO_CMP_EN;
2043	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2044}
2045EXPORT_SYMBOL(pci_enable_ido);
2046
2047/**
2048 * pci_disable_ido - disable ID-based ordering on a device
2049 * @dev: the PCI device
2050 * @type: which types of IDO to disable
2051 */
2052void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2053{
2054	int pos;
2055	u16 ctrl;
2056
2057	if (!pci_is_pcie(dev))
2058		return;
2059
2060	pos = pci_pcie_cap(dev);
2061	if (!pos)
2062		return;
 
 
 
 
2063
2064	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2065	if (type & PCI_EXP_IDO_REQUEST)
2066		ctrl &= ~PCI_EXP_IDO_REQ_EN;
2067	if (type & PCI_EXP_IDO_COMPLETION)
2068		ctrl &= ~PCI_EXP_IDO_CMP_EN;
2069	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2070}
2071EXPORT_SYMBOL(pci_disable_ido);
2072
2073/**
2074 * pci_enable_obff - enable optimized buffer flush/fill
2075 * @dev: PCI device
2076 * @type: type of signaling to use
2077 *
2078 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2079 * signaling if possible, falling back to message signaling only if
2080 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2081 * be brought out of L0s or L1 to send the message.  It should be either
2082 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2083 *
2084 * If your device can benefit from receiving all messages, even at the
2085 * power cost of bringing the link back up from a low power state, use
2086 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2087 * preferred type).
2088 *
2089 * RETURNS:
2090 * Zero on success, appropriate error number on failure.
 
 
 
 
 
2091 */
2092int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2093{
2094	int pos;
2095	u32 cap;
2096	u16 ctrl;
2097	int ret;
2098
2099	if (!pci_is_pcie(dev))
2100		return -ENOTSUPP;
2101
2102	pos = pci_pcie_cap(dev);
2103	if (!pos)
2104		return -ENOTSUPP;
2105
2106	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2107	if (!(cap & PCI_EXP_OBFF_MASK))
2108		return -ENOTSUPP; /* no OBFF support at all */
2109
2110	/* Make sure the topology supports OBFF as well */
2111	if (dev->bus) {
2112		ret = pci_enable_obff(dev->bus->self, type);
2113		if (ret)
2114			return ret;
2115	}
2116
2117	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2118	if (cap & PCI_EXP_OBFF_WAKE)
2119		ctrl |= PCI_EXP_OBFF_WAKE_EN;
2120	else {
2121		switch (type) {
2122		case PCI_EXP_OBFF_SIGNAL_L0:
2123			if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2124				ctrl |= PCI_EXP_OBFF_MSGA_EN;
2125			break;
2126		case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2127			ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2128			ctrl |= PCI_EXP_OBFF_MSGB_EN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2129			break;
2130		default:
2131			WARN(1, "bad OBFF signal type\n");
2132			return -ENOTSUPP;
2133		}
2134	}
2135	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2136
2137	return 0;
 
 
 
 
2138}
2139EXPORT_SYMBOL(pci_enable_obff);
2140
2141/**
2142 * pci_disable_obff - disable optimized buffer flush/fill
2143 * @dev: PCI device
 
 
2144 *
2145 * Disable OBFF on @dev.
 
2146 */
2147void pci_disable_obff(struct pci_dev *dev)
 
2148{
2149	int pos;
2150	u16 ctrl;
2151
2152	if (!pci_is_pcie(dev))
2153		return;
2154
2155	pos = pci_pcie_cap(dev);
2156	if (!pos)
2157		return;
2158
2159	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2160	ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2161	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
 
 
 
 
2162}
2163EXPORT_SYMBOL(pci_disable_obff);
2164
2165/**
2166 * pci_ltr_supported - check whether a device supports LTR
2167 * @dev: PCI device
2168 *
2169 * RETURNS:
2170 * True if @dev supports latency tolerance reporting, false otherwise.
2171 */
2172bool pci_ltr_supported(struct pci_dev *dev)
2173{
2174	int pos;
2175	u32 cap;
2176
2177	if (!pci_is_pcie(dev))
2178		return false;
2179
2180	pos = pci_pcie_cap(dev);
2181	if (!pos)
2182		return false;
2183
2184	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2185
2186	return cap & PCI_EXP_DEVCAP2_LTR;
 
 
 
 
2187}
2188EXPORT_SYMBOL(pci_ltr_supported);
2189
2190/**
2191 * pci_enable_ltr - enable latency tolerance reporting
2192 * @dev: PCI device
 
2193 *
2194 * Enable LTR on @dev if possible, which means enabling it first on
2195 * upstream ports.
2196 *
2197 * RETURNS:
2198 * Zero on success, errno on failure.
2199 */
2200int pci_enable_ltr(struct pci_dev *dev)
2201{
2202	int pos;
2203	u16 ctrl;
2204	int ret;
2205
2206	if (!pci_ltr_supported(dev))
2207		return -ENOTSUPP;
2208
2209	pos = pci_pcie_cap(dev);
2210	if (!pos)
2211		return -ENOTSUPP;
2212
2213	/* Only primary function can enable/disable LTR */
2214	if (PCI_FUNC(dev->devfn) != 0)
2215		return -EINVAL;
2216
2217	/* Enable upstream ports first */
2218	if (dev->bus) {
2219		ret = pci_enable_ltr(dev->bus->self);
2220		if (ret)
2221			return ret;
 
 
2222	}
2223
2224	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2225	ctrl |= PCI_EXP_LTR_EN;
2226	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2227
2228	return 0;
2229}
2230EXPORT_SYMBOL(pci_enable_ltr);
2231
2232/**
2233 * pci_disable_ltr - disable latency tolerance reporting
2234 * @dev: PCI device
 
 
 
 
2235 */
2236void pci_disable_ltr(struct pci_dev *dev)
2237{
2238	int pos;
2239	u16 ctrl;
2240
2241	if (!pci_ltr_supported(dev))
2242		return;
 
2243
2244	pos = pci_pcie_cap(dev);
2245	if (!pos)
2246		return;
2247
2248	/* Only primary function can enable/disable LTR */
2249	if (PCI_FUNC(dev->devfn) != 0)
2250		return;
 
2251
2252	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2253	ctrl &= ~PCI_EXP_LTR_EN;
2254	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2255}
2256EXPORT_SYMBOL(pci_disable_ltr);
2257
2258static int __pci_ltr_scale(int *val)
2259{
2260	int scale = 0;
2261
2262	while (*val > 1023) {
2263		*val = (*val + 31) / 32;
2264		scale++;
2265	}
2266	return scale;
2267}
 
2268
2269/**
2270 * pci_set_ltr - set LTR latency values
2271 * @dev: PCI device
2272 * @snoop_lat_ns: snoop latency in nanoseconds
2273 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2274 *
2275 * Figure out the scale and set the LTR values accordingly.
 
2276 */
2277int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2278{
2279	int pos, ret, snoop_scale, nosnoop_scale;
2280	u16 val;
2281
2282	if (!pci_ltr_supported(dev))
2283		return -ENOTSUPP;
2284
2285	snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2286	nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2287
2288	if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2289	    nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2290		return -EINVAL;
2291
2292	if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2293	    (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2294		return -EINVAL;
2295
2296	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2297	if (!pos)
2298		return -ENOTSUPP;
2299
2300	val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2301	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2302	if (ret != 4)
2303		return -EIO;
2304
2305	val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2306	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2307	if (ret != 4)
2308		return -EIO;
2309
2310	return 0;
 
2311}
2312EXPORT_SYMBOL(pci_set_ltr);
2313
2314static int pci_acs_enable;
2315
2316/**
2317 * pci_request_acs - ask for ACS to be enabled if supported
 
 
 
 
 
 
2318 */
2319void pci_request_acs(void)
2320{
2321	pci_acs_enable = 1;
 
 
 
 
 
 
 
 
 
 
 
2322}
2323
2324/**
2325 * pci_enable_acs - enable ACS if hardware support it
2326 * @dev: the PCI device
 
 
 
 
 
 
 
 
 
2327 */
2328void pci_enable_acs(struct pci_dev *dev)
2329{
2330	int pos;
2331	u16 cap;
2332	u16 ctrl;
2333
2334	if (!pci_acs_enable)
2335		return;
2336
2337	if (!pci_is_pcie(dev))
2338		return;
2339
2340	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2341	if (!pos)
2342		return;
 
 
 
2343
2344	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2345	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 
 
 
 
 
 
2346
2347	/* Source Validation */
2348	ctrl |= (cap & PCI_ACS_SV);
2349
2350	/* P2P Request Redirect */
2351	ctrl |= (cap & PCI_ACS_RR);
2352
2353	/* P2P Completion Redirect */
2354	ctrl |= (cap & PCI_ACS_CR);
 
 
 
 
 
2355
2356	/* Upstream Forwarding */
2357	ctrl |= (cap & PCI_ACS_UF);
 
 
 
 
2358
2359	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
2360}
 
2361
2362/**
2363 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2364 * @dev: the PCI device
2365 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2366 *
2367 * Perform INTx swizzling for a device behind one level of bridge.  This is
2368 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2369 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2370 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2371 * the PCI Express Base Specification, Revision 2.1)
2372 */
2373u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2374{
2375	int slot;
2376
2377	if (pci_ari_enabled(dev->bus))
2378		slot = 0;
2379	else
2380		slot = PCI_SLOT(dev->devfn);
2381
2382	return (((pin - 1) + slot) % 4) + 1;
2383}
2384
2385int
2386pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2387{
2388	u8 pin;
2389
2390	pin = dev->pin;
2391	if (!pin)
2392		return -1;
2393
2394	while (!pci_is_root_bus(dev->bus)) {
2395		pin = pci_swizzle_interrupt_pin(dev, pin);
2396		dev = dev->bus->self;
2397	}
2398	*bridge = dev;
2399	return pin;
2400}
2401
2402/**
2403 * pci_common_swizzle - swizzle INTx all the way to root bridge
2404 * @dev: the PCI device
2405 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2406 *
2407 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2408 * bridges all the way up to a PCI root bus.
2409 */
2410u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2411{
2412	u8 pin = *pinp;
2413
2414	while (!pci_is_root_bus(dev->bus)) {
2415		pin = pci_swizzle_interrupt_pin(dev, pin);
2416		dev = dev->bus->self;
2417	}
2418	*pinp = pin;
2419	return PCI_SLOT(dev->devfn);
2420}
 
2421
2422/**
2423 *	pci_release_region - Release a PCI bar
2424 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2425 *	@bar: BAR to release
2426 *
2427 *	Releases the PCI I/O and memory resources previously reserved by a
2428 *	successful call to pci_request_region.  Call this function only
2429 *	after all use of the PCI regions has ceased.
 
2430 */
2431void pci_release_region(struct pci_dev *pdev, int bar)
2432{
2433	struct pci_devres *dr;
2434
2435	if (pci_resource_len(pdev, bar) == 0)
2436		return;
2437	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2438		release_region(pci_resource_start(pdev, bar),
2439				pci_resource_len(pdev, bar));
2440	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2441		release_mem_region(pci_resource_start(pdev, bar),
2442				pci_resource_len(pdev, bar));
2443
2444	dr = find_pci_dr(pdev);
2445	if (dr)
2446		dr->region_mask &= ~(1 << bar);
2447}
 
2448
2449/**
2450 *	__pci_request_region - Reserved PCI I/O and memory resource
2451 *	@pdev: PCI device whose resources are to be reserved
2452 *	@bar: BAR to be reserved
2453 *	@res_name: Name to be associated with resource.
2454 *	@exclusive: whether the region access is exclusive or not
2455 *
2456 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2457 *	being reserved by owner @res_name.  Do not access any
2458 *	address inside the PCI regions unless this call returns
2459 *	successfully.
2460 *
2461 *	If @exclusive is set, then the region is marked so that userspace
2462 *	is explicitly not allowed to map the resource via /dev/mem or
2463 * 	sysfs MMIO access.
2464 *
2465 *	Returns 0 on success, or %EBUSY on error.  A warning
2466 *	message is also printed on failure.
2467 */
2468static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2469									int exclusive)
2470{
2471	struct pci_devres *dr;
2472
2473	if (pci_resource_len(pdev, bar) == 0)
2474		return 0;
2475		
2476	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2477		if (!request_region(pci_resource_start(pdev, bar),
2478			    pci_resource_len(pdev, bar), res_name))
2479			goto err_out;
2480	}
2481	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2482		if (!__request_mem_region(pci_resource_start(pdev, bar),
2483					pci_resource_len(pdev, bar), res_name,
2484					exclusive))
2485			goto err_out;
2486	}
2487
2488	dr = find_pci_dr(pdev);
2489	if (dr)
2490		dr->region_mask |= 1 << bar;
2491
2492	return 0;
2493
2494err_out:
2495	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2496		 &pdev->resource[bar]);
2497	return -EBUSY;
2498}
2499
2500/**
2501 *	pci_request_region - Reserve PCI I/O and memory resource
2502 *	@pdev: PCI device whose resources are to be reserved
2503 *	@bar: BAR to be reserved
2504 *	@res_name: Name to be associated with resource
2505 *
2506 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2507 *	being reserved by owner @res_name.  Do not access any
2508 *	address inside the PCI regions unless this call returns
2509 *	successfully.
2510 *
2511 *	Returns 0 on success, or %EBUSY on error.  A warning
2512 *	message is also printed on failure.
2513 */
2514int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2515{
2516	return __pci_request_region(pdev, bar, res_name, 0);
2517}
 
2518
2519/**
2520 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2521 *	@pdev: PCI device whose resources are to be reserved
2522 *	@bar: BAR to be reserved
2523 *	@res_name: Name to be associated with resource.
2524 *
2525 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2526 *	being reserved by owner @res_name.  Do not access any
2527 *	address inside the PCI regions unless this call returns
2528 *	successfully.
2529 *
2530 *	Returns 0 on success, or %EBUSY on error.  A warning
2531 *	message is also printed on failure.
2532 *
2533 *	The key difference that _exclusive makes it that userspace is
2534 *	explicitly not allowed to map the resource via /dev/mem or
2535 * 	sysfs.
2536 */
2537int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2538{
2539	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2540}
2541/**
2542 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2543 * @pdev: PCI device whose resources were previously reserved
2544 * @bars: Bitmask of BARs to be released
2545 *
2546 * Release selected PCI I/O and memory resources previously reserved.
2547 * Call this function only after all use of the PCI regions has ceased.
2548 */
2549void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2550{
2551	int i;
2552
2553	for (i = 0; i < 6; i++)
2554		if (bars & (1 << i))
2555			pci_release_region(pdev, i);
2556}
 
2557
2558int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2559				 const char *res_name, int excl)
2560{
2561	int i;
2562
2563	for (i = 0; i < 6; i++)
2564		if (bars & (1 << i))
2565			if (__pci_request_region(pdev, i, res_name, excl))
2566				goto err_out;
2567	return 0;
2568
2569err_out:
2570	while(--i >= 0)
2571		if (bars & (1 << i))
2572			pci_release_region(pdev, i);
2573
2574	return -EBUSY;
2575}
2576
2577
2578/**
2579 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2580 * @pdev: PCI device whose resources are to be reserved
2581 * @bars: Bitmask of BARs to be requested
2582 * @res_name: Name to be associated with resource
2583 */
2584int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2585				 const char *res_name)
2586{
2587	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2588}
 
2589
2590int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2591				 int bars, const char *res_name)
2592{
2593	return __pci_request_selected_regions(pdev, bars, res_name,
2594			IORESOURCE_EXCLUSIVE);
2595}
 
2596
2597/**
2598 *	pci_release_regions - Release reserved PCI I/O and memory resources
2599 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2600 *
2601 *	Releases all PCI I/O and memory resources previously reserved by a
2602 *	successful call to pci_request_regions.  Call this function only
2603 *	after all use of the PCI regions has ceased.
 
2604 */
2605
2606void pci_release_regions(struct pci_dev *pdev)
2607{
2608	pci_release_selected_regions(pdev, (1 << 6) - 1);
2609}
 
2610
2611/**
2612 *	pci_request_regions - Reserved PCI I/O and memory resources
2613 *	@pdev: PCI device whose resources are to be reserved
2614 *	@res_name: Name to be associated with resource.
2615 *
2616 *	Mark all PCI regions associated with PCI device @pdev as
2617 *	being reserved by owner @res_name.  Do not access any
2618 *	address inside the PCI regions unless this call returns
2619 *	successfully.
2620 *
2621 *	Returns 0 on success, or %EBUSY on error.  A warning
2622 *	message is also printed on failure.
2623 */
2624int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2625{
2626	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
 
2627}
 
2628
2629/**
2630 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2631 *	@pdev: PCI device whose resources are to be reserved
2632 *	@res_name: Name to be associated with resource.
2633 *
2634 *	Mark all PCI regions associated with PCI device @pdev as
2635 *	being reserved by owner @res_name.  Do not access any
2636 *	address inside the PCI regions unless this call returns
2637 *	successfully.
2638 *
2639 *	pci_request_regions_exclusive() will mark the region so that
2640 * 	/dev/mem and the sysfs MMIO access will not be allowed.
2641 *
2642 *	Returns 0 on success, or %EBUSY on error.  A warning
2643 *	message is also printed on failure.
2644 */
2645int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2646{
2647	return pci_request_selected_regions_exclusive(pdev,
2648					((1 << 6) - 1), res_name);
2649}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2650
2651static void __pci_set_master(struct pci_dev *dev, bool enable)
2652{
2653	u16 old_cmd, cmd;
2654
2655	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2656	if (enable)
2657		cmd = old_cmd | PCI_COMMAND_MASTER;
2658	else
2659		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2660	if (cmd != old_cmd) {
2661		dev_dbg(&dev->dev, "%s bus mastering\n",
2662			enable ? "enabling" : "disabling");
2663		pci_write_config_word(dev, PCI_COMMAND, cmd);
2664	}
2665	dev->is_busmaster = enable;
2666}
2667
2668/**
 
 
 
 
 
 
 
 
 
 
 
 
2669 * pcibios_set_master - enable PCI bus-mastering for device dev
2670 * @dev: the PCI device to enable
2671 *
2672 * Enables PCI bus-mastering for the device.  This is the default
2673 * implementation.  Architecture specific implementations can override
2674 * this if necessary.
2675 */
2676void __weak pcibios_set_master(struct pci_dev *dev)
2677{
2678	u8 lat;
2679
2680	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2681	if (pci_is_pcie(dev))
2682		return;
2683
2684	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2685	if (lat < 16)
2686		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2687	else if (lat > pcibios_max_latency)
2688		lat = pcibios_max_latency;
2689	else
2690		return;
2691	dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2692	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2693}
2694
2695/**
2696 * pci_set_master - enables bus-mastering for device dev
2697 * @dev: the PCI device to enable
2698 *
2699 * Enables bus-mastering on the device and calls pcibios_set_master()
2700 * to do the needed arch specific settings.
2701 */
2702void pci_set_master(struct pci_dev *dev)
2703{
2704	__pci_set_master(dev, true);
2705	pcibios_set_master(dev);
2706}
 
2707
2708/**
2709 * pci_clear_master - disables bus-mastering for device dev
2710 * @dev: the PCI device to disable
2711 */
2712void pci_clear_master(struct pci_dev *dev)
2713{
2714	__pci_set_master(dev, false);
2715}
 
2716
2717/**
2718 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2719 * @dev: the PCI device for which MWI is to be enabled
2720 *
2721 * Helper function for pci_set_mwi.
2722 * Originally copied from drivers/net/acenic.c.
2723 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2724 *
2725 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2726 */
2727int pci_set_cacheline_size(struct pci_dev *dev)
2728{
2729	u8 cacheline_size;
2730
2731	if (!pci_cache_line_size)
2732		return -EINVAL;
2733
2734	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2735	   equal to or multiple of the right value. */
2736	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2737	if (cacheline_size >= pci_cache_line_size &&
2738	    (cacheline_size % pci_cache_line_size) == 0)
2739		return 0;
2740
2741	/* Write the correct value. */
2742	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2743	/* Read it back. */
2744	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2745	if (cacheline_size == pci_cache_line_size)
2746		return 0;
2747
2748	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2749		   "supported\n", pci_cache_line_size << 2);
2750
2751	return -EINVAL;
2752}
2753EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2754
2755#ifdef PCI_DISABLE_MWI
2756int pci_set_mwi(struct pci_dev *dev)
2757{
2758	return 0;
2759}
2760
2761int pci_try_set_mwi(struct pci_dev *dev)
2762{
2763	return 0;
2764}
2765
2766void pci_clear_mwi(struct pci_dev *dev)
2767{
2768}
2769
2770#else
2771
2772/**
2773 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2774 * @dev: the PCI device for which MWI is enabled
2775 *
2776 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2777 *
2778 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2779 */
2780int
2781pci_set_mwi(struct pci_dev *dev)
2782{
 
 
 
2783	int rc;
2784	u16 cmd;
2785
2786	rc = pci_set_cacheline_size(dev);
2787	if (rc)
2788		return rc;
2789
2790	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2791	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2792		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2793		cmd |= PCI_COMMAND_INVALIDATE;
2794		pci_write_config_word(dev, PCI_COMMAND, cmd);
2795	}
2796	
2797	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2798}
 
2799
2800/**
2801 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2802 * @dev: the PCI device for which MWI is enabled
2803 *
2804 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2805 * Callers are not required to check the return value.
2806 *
2807 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2808 */
2809int pci_try_set_mwi(struct pci_dev *dev)
2810{
2811	int rc = pci_set_mwi(dev);
2812	return rc;
 
 
 
2813}
 
2814
2815/**
2816 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2817 * @dev: the PCI device to disable
2818 *
2819 * Disables PCI Memory-Write-Invalidate transaction on the device
2820 */
2821void
2822pci_clear_mwi(struct pci_dev *dev)
2823{
 
2824	u16 cmd;
2825
2826	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2827	if (cmd & PCI_COMMAND_INVALIDATE) {
2828		cmd &= ~PCI_COMMAND_INVALIDATE;
2829		pci_write_config_word(dev, PCI_COMMAND, cmd);
2830	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2831}
2832#endif /* ! PCI_DISABLE_MWI */
2833
2834/**
2835 * pci_intx - enables/disables PCI INTx for device dev
2836 * @pdev: the PCI device to operate on
2837 * @enable: boolean: whether to enable or disable PCI INTx
2838 *
2839 * Enables/disables PCI INTx for device dev
2840 */
2841void
2842pci_intx(struct pci_dev *pdev, int enable)
2843{
2844	u16 pci_command, new;
2845
2846	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2847
2848	if (enable) {
2849		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2850	} else {
2851		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2852	}
2853
2854	if (new != pci_command) {
2855		struct pci_devres *dr;
2856
2857		pci_write_config_word(pdev, PCI_COMMAND, new);
2858
2859		dr = find_pci_dr(pdev);
2860		if (dr && !dr->restore_intx) {
2861			dr->restore_intx = 1;
2862			dr->orig_intx = !enable;
2863		}
2864	}
2865}
2866
2867/**
2868 * pci_intx_mask_supported - probe for INTx masking support
2869 * @dev: the PCI device to operate on
2870 *
2871 * Check if the device dev support INTx masking via the config space
2872 * command word.
2873 */
2874bool pci_intx_mask_supported(struct pci_dev *dev)
2875{
2876	bool mask_supported = false;
2877	u16 orig, new;
2878
2879	pci_cfg_access_lock(dev);
2880
2881	pci_read_config_word(dev, PCI_COMMAND, &orig);
2882	pci_write_config_word(dev, PCI_COMMAND,
2883			      orig ^ PCI_COMMAND_INTX_DISABLE);
2884	pci_read_config_word(dev, PCI_COMMAND, &new);
2885
2886	/*
2887	 * There's no way to protect against hardware bugs or detect them
2888	 * reliably, but as long as we know what the value should be, let's
2889	 * go ahead and check it.
2890	 */
2891	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2892		dev_err(&dev->dev, "Command register changed from "
2893			"0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2894	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2895		mask_supported = true;
2896		pci_write_config_word(dev, PCI_COMMAND, orig);
2897	}
2898
2899	pci_cfg_access_unlock(dev);
2900	return mask_supported;
2901}
2902EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2903
2904static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2905{
2906	struct pci_bus *bus = dev->bus;
2907	bool mask_updated = true;
2908	u32 cmd_status_dword;
2909	u16 origcmd, newcmd;
2910	unsigned long flags;
2911	bool irq_pending;
2912
2913	/*
2914	 * We do a single dword read to retrieve both command and status.
2915	 * Document assumptions that make this possible.
2916	 */
2917	BUILD_BUG_ON(PCI_COMMAND % 4);
2918	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2919
2920	raw_spin_lock_irqsave(&pci_lock, flags);
2921
2922	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2923
2924	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2925
2926	/*
2927	 * Check interrupt status register to see whether our device
2928	 * triggered the interrupt (when masking) or the next IRQ is
2929	 * already pending (when unmasking).
2930	 */
2931	if (mask != irq_pending) {
2932		mask_updated = false;
2933		goto done;
2934	}
2935
2936	origcmd = cmd_status_dword;
2937	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2938	if (mask)
2939		newcmd |= PCI_COMMAND_INTX_DISABLE;
2940	if (newcmd != origcmd)
2941		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2942
2943done:
2944	raw_spin_unlock_irqrestore(&pci_lock, flags);
2945
2946	return mask_updated;
2947}
2948
2949/**
2950 * pci_check_and_mask_intx - mask INTx on pending interrupt
2951 * @dev: the PCI device to operate on
2952 *
2953 * Check if the device dev has its INTx line asserted, mask it and
2954 * return true in that case. False is returned if not interrupt was
2955 * pending.
2956 */
2957bool pci_check_and_mask_intx(struct pci_dev *dev)
2958{
2959	return pci_check_and_set_intx_mask(dev, true);
2960}
2961EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2962
2963/**
2964 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
2965 * @dev: the PCI device to operate on
2966 *
2967 * Check if the device dev has its INTx line asserted, unmask it if not
2968 * and return true. False is returned and the mask remains active if
2969 * there was still an interrupt pending.
2970 */
2971bool pci_check_and_unmask_intx(struct pci_dev *dev)
2972{
2973	return pci_check_and_set_intx_mask(dev, false);
2974}
2975EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2976
2977/**
2978 * pci_msi_off - disables any msi or msix capabilities
2979 * @dev: the PCI device to operate on
2980 *
2981 * If you want to use msi see pci_enable_msi and friends.
2982 * This is a lower level primitive that allows us to disable
2983 * msi operation at the device level.
2984 */
2985void pci_msi_off(struct pci_dev *dev)
2986{
2987	int pos;
2988	u16 control;
2989
2990	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2991	if (pos) {
2992		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2993		control &= ~PCI_MSI_FLAGS_ENABLE;
2994		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2995	}
2996	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2997	if (pos) {
2998		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2999		control &= ~PCI_MSIX_FLAGS_ENABLE;
3000		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3001	}
3002}
3003EXPORT_SYMBOL_GPL(pci_msi_off);
3004
3005int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
 
 
 
 
 
 
 
3006{
3007	return dma_set_max_seg_size(&dev->dev, size);
3008}
3009EXPORT_SYMBOL(pci_set_dma_max_seg_size);
3010
3011int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3012{
3013	return dma_set_seg_boundary(&dev->dev, mask);
 
 
3014}
3015EXPORT_SYMBOL(pci_set_dma_seg_boundary);
3016
3017static int pcie_flr(struct pci_dev *dev, int probe)
 
 
 
 
 
 
 
 
3018{
3019	int i;
3020	int pos;
3021	u32 cap;
3022	u16 status, control;
3023
3024	pos = pci_pcie_cap(dev);
3025	if (!pos)
3026		return -ENOTTY;
3027
3028	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
3029	if (!(cap & PCI_EXP_DEVCAP_FLR))
3030		return -ENOTTY;
3031
3032	if (probe)
3033		return 0;
3034
3035	/* Wait for Transaction Pending bit clean */
3036	for (i = 0; i < 4; i++) {
3037		if (i)
3038			msleep((1 << (i - 1)) * 100);
3039
3040		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3041		if (!(status & PCI_EXP_DEVSTA_TRPND))
3042			goto clear;
3043	}
3044
3045	dev_err(&dev->dev, "transaction is not cleared; "
3046			"proceeding with reset anyway\n");
3047
3048clear:
3049	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3050	control |= PCI_EXP_DEVCTL_BCR_FLR;
3051	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3052
3053	msleep(100);
3054
3055	return 0;
3056}
 
3057
3058static int pci_af_flr(struct pci_dev *dev, int probe)
3059{
3060	int i;
3061	int pos;
3062	u8 cap;
3063	u8 status;
3064
3065	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3066	if (!pos)
3067		return -ENOTTY;
3068
 
 
 
3069	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3070	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3071		return -ENOTTY;
3072
3073	if (probe)
3074		return 0;
3075
3076	/* Wait for Transaction Pending bit clean */
3077	for (i = 0; i < 4; i++) {
3078		if (i)
3079			msleep((1 << (i - 1)) * 100);
 
 
 
 
3080
3081		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3082		if (!(status & PCI_AF_STATUS_TP))
3083			goto clear;
3084	}
3085
3086	dev_err(&dev->dev, "transaction is not cleared; "
3087			"proceeding with reset anyway\n");
3088
3089clear:
3090	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
 
 
 
 
3091	msleep(100);
3092
3093	return 0;
3094}
3095
3096/**
3097 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3098 * @dev: Device to reset.
3099 * @probe: If set, only check if the device can be reset this way.
3100 *
3101 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3102 * unset, it will be reinitialized internally when going from PCI_D3hot to
3103 * PCI_D0.  If that's the case and the device is not in a low-power state
3104 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3105 *
3106 * NOTE: This causes the caller to sleep for twice the device power transition
3107 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3108 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3109 * Moreover, only devices in D0 can be reset by this function.
3110 */
3111static int pci_pm_reset(struct pci_dev *dev, int probe)
3112{
3113	u16 csr;
3114
3115	if (!dev->pm_cap)
3116		return -ENOTTY;
3117
3118	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3119	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3120		return -ENOTTY;
3121
3122	if (probe)
3123		return 0;
3124
3125	if (dev->current_state != PCI_D0)
3126		return -EINVAL;
3127
3128	csr &= ~PCI_PM_CTRL_STATE_MASK;
3129	csr |= PCI_D3hot;
3130	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3131	pci_dev_d3_sleep(dev);
3132
3133	csr &= ~PCI_PM_CTRL_STATE_MASK;
3134	csr |= PCI_D0;
3135	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3136	pci_dev_d3_sleep(dev);
3137
3138	return 0;
3139}
3140
3141static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3142{
3143	u16 ctrl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3144	struct pci_dev *pdev;
3145
3146	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
 
3147		return -ENOTTY;
3148
3149	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3150		if (pdev != dev)
3151			return -ENOTTY;
3152
3153	if (probe)
3154		return 0;
3155
3156	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3157	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3158	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3159	msleep(100);
3160
3161	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3162	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3163	msleep(100);
3164
3165	return 0;
3166}
3167
3168static int __pci_dev_reset(struct pci_dev *dev, int probe)
3169{
3170	int rc;
3171
3172	might_sleep();
 
3173
3174	rc = pci_dev_specific_reset(dev, probe);
3175	if (rc != -ENOTTY)
3176		goto done;
3177
3178	rc = pcie_flr(dev, probe);
3179	if (rc != -ENOTTY)
3180		goto done;
3181
3182	rc = pci_af_flr(dev, probe);
3183	if (rc != -ENOTTY)
3184		goto done;
3185
3186	rc = pci_pm_reset(dev, probe);
3187	if (rc != -ENOTTY)
3188		goto done;
 
 
3189
3190	rc = pci_parent_bus_reset(dev, probe);
3191done:
3192	return rc;
3193}
3194
3195static int pci_dev_reset(struct pci_dev *dev, int probe)
3196{
3197	int rc;
3198
3199	if (!probe) {
3200		pci_cfg_access_lock(dev);
3201		/* block PM suspend, driver probe, etc. */
3202		device_lock(&dev->dev);
3203	}
3204
3205	rc = __pci_dev_reset(dev, probe);
 
 
 
 
 
3206
3207	if (!probe) {
3208		device_unlock(&dev->dev);
 
 
 
 
3209		pci_cfg_access_unlock(dev);
3210	}
3211	return rc;
 
3212}
3213/**
3214 * __pci_reset_function - reset a PCI device function
3215 * @dev: PCI device to reset
3216 *
3217 * Some devices allow an individual function to be reset without affecting
3218 * other functions in the same device.  The PCI device must be responsive
3219 * to PCI config space in order to use this function.
3220 *
3221 * The device function is presumed to be unused when this function is called.
3222 * Resetting the device will make the contents of PCI configuration space
3223 * random, so any caller of this must be prepared to reinitialise the
3224 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3225 * etc.
3226 *
3227 * Returns 0 if the device function was successfully reset or negative if the
3228 * device doesn't support resetting a single function.
3229 */
3230int __pci_reset_function(struct pci_dev *dev)
3231{
3232	return pci_dev_reset(dev, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3233}
3234EXPORT_SYMBOL_GPL(__pci_reset_function);
3235
3236/**
3237 * __pci_reset_function_locked - reset a PCI device function while holding
3238 * the @dev mutex lock.
3239 * @dev: PCI device to reset
3240 *
3241 * Some devices allow an individual function to be reset without affecting
3242 * other functions in the same device.  The PCI device must be responsive
3243 * to PCI config space in order to use this function.
3244 *
3245 * The device function is presumed to be unused and the caller is holding
3246 * the device mutex lock when this function is called.
 
3247 * Resetting the device will make the contents of PCI configuration space
3248 * random, so any caller of this must be prepared to reinitialise the
3249 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3250 * etc.
3251 *
3252 * Returns 0 if the device function was successfully reset or negative if the
3253 * device doesn't support resetting a single function.
3254 */
3255int __pci_reset_function_locked(struct pci_dev *dev)
3256{
3257	return __pci_dev_reset(dev, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3258}
3259EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3260
3261/**
3262 * pci_probe_reset_function - check whether the device can be safely reset
3263 * @dev: PCI device to reset
3264 *
3265 * Some devices allow an individual function to be reset without affecting
3266 * other functions in the same device.  The PCI device must be responsive
3267 * to PCI config space in order to use this function.
3268 *
3269 * Returns 0 if the device function can be reset or negative if the
3270 * device doesn't support resetting a single function.
3271 */
3272int pci_probe_reset_function(struct pci_dev *dev)
3273{
3274	return pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3275}
3276
3277/**
3278 * pci_reset_function - quiesce and reset a PCI device function
3279 * @dev: PCI device to reset
3280 *
3281 * Some devices allow an individual function to be reset without affecting
3282 * other functions in the same device.  The PCI device must be responsive
3283 * to PCI config space in order to use this function.
3284 *
3285 * This function does not just reset the PCI portion of a device, but
3286 * clears all the state associated with the device.  This function differs
3287 * from __pci_reset_function in that it saves and restores device state
3288 * over the reset.
3289 *
3290 * Returns 0 if the device function was successfully reset or negative if the
3291 * device doesn't support resetting a single function.
3292 */
3293int pci_reset_function(struct pci_dev *dev)
3294{
3295	int rc;
3296
3297	rc = pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3298	if (rc)
3299		return rc;
3300
3301	pci_save_state(dev);
 
 
 
 
 
 
 
3302
3303	/*
3304	 * both INTx and MSI are disabled after the Interrupt Disable bit
3305	 * is set and the Bus Master bit is cleared.
3306	 */
3307	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3308
3309	rc = pci_dev_reset(dev, 0);
 
 
3310
3311	pci_restore_state(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3312
3313	return rc;
3314}
3315EXPORT_SYMBOL_GPL(pci_reset_function);
 
 
 
 
 
 
 
 
 
 
 
 
3316
3317/**
3318 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3319 * @dev: PCI device to query
3320 *
3321 * Returns mmrbc: maximum designed memory read count in bytes
3322 *    or appropriate error value.
3323 */
3324int pcix_get_max_mmrbc(struct pci_dev *dev)
3325{
3326	int cap;
3327	u32 stat;
3328
3329	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3330	if (!cap)
3331		return -EINVAL;
3332
3333	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3334		return -EINVAL;
3335
3336	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3337}
3338EXPORT_SYMBOL(pcix_get_max_mmrbc);
3339
3340/**
3341 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3342 * @dev: PCI device to query
3343 *
3344 * Returns mmrbc: maximum memory read count in bytes
3345 *    or appropriate error value.
3346 */
3347int pcix_get_mmrbc(struct pci_dev *dev)
3348{
3349	int cap;
3350	u16 cmd;
3351
3352	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3353	if (!cap)
3354		return -EINVAL;
3355
3356	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3357		return -EINVAL;
3358
3359	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3360}
3361EXPORT_SYMBOL(pcix_get_mmrbc);
3362
3363/**
3364 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3365 * @dev: PCI device to query
3366 * @mmrbc: maximum memory read count in bytes
3367 *    valid values are 512, 1024, 2048, 4096
3368 *
3369 * If possible sets maximum memory read byte count, some bridges have erratas
3370 * that prevent this.
3371 */
3372int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3373{
3374	int cap;
3375	u32 stat, v, o;
3376	u16 cmd;
3377
3378	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3379		return -EINVAL;
3380
3381	v = ffs(mmrbc) - 10;
3382
3383	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3384	if (!cap)
3385		return -EINVAL;
3386
3387	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3388		return -EINVAL;
3389
3390	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3391		return -E2BIG;
3392
3393	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3394		return -EINVAL;
3395
3396	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3397	if (o != v) {
3398		if (v > o && dev->bus &&
3399		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3400			return -EIO;
3401
3402		cmd &= ~PCI_X_CMD_MAX_READ;
3403		cmd |= v << 2;
3404		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3405			return -EIO;
3406	}
3407	return 0;
3408}
3409EXPORT_SYMBOL(pcix_set_mmrbc);
3410
3411/**
3412 * pcie_get_readrq - get PCI Express read request size
3413 * @dev: PCI device to query
3414 *
3415 * Returns maximum memory read request in bytes
3416 *    or appropriate error value.
3417 */
3418int pcie_get_readrq(struct pci_dev *dev)
3419{
3420	int ret, cap;
3421	u16 ctl;
3422
3423	cap = pci_pcie_cap(dev);
3424	if (!cap)
3425		return -EINVAL;
3426
3427	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3428	if (!ret)
3429		ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3430
3431	return ret;
3432}
3433EXPORT_SYMBOL(pcie_get_readrq);
3434
3435/**
3436 * pcie_set_readrq - set PCI Express maximum memory read request
3437 * @dev: PCI device to query
3438 * @rq: maximum memory read count in bytes
3439 *    valid values are 128, 256, 512, 1024, 2048, 4096
3440 *
3441 * If possible sets maximum memory read request in bytes
3442 */
3443int pcie_set_readrq(struct pci_dev *dev, int rq)
3444{
3445	int cap, err = -EINVAL;
3446	u16 ctl, v;
3447
3448	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3449		goto out;
3450
3451	cap = pci_pcie_cap(dev);
3452	if (!cap)
3453		goto out;
3454
3455	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3456	if (err)
3457		goto out;
3458	/*
3459	 * If using the "performance" PCIe config, we clamp the
3460	 * read rq size to the max packet size to prevent the
3461	 * host bridge generating requests larger than we can
3462	 * cope with
3463	 */
3464	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3465		int mps = pcie_get_mps(dev);
3466
3467		if (mps < 0)
3468			return mps;
3469		if (mps < rq)
3470			rq = mps;
3471	}
3472
3473	v = (ffs(rq) - 8) << 12;
3474
3475	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3476		ctl &= ~PCI_EXP_DEVCTL_READRQ;
3477		ctl |= v;
3478		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3479	}
3480
3481out:
3482	return err;
3483}
3484EXPORT_SYMBOL(pcie_set_readrq);
3485
3486/**
3487 * pcie_get_mps - get PCI Express maximum payload size
3488 * @dev: PCI device to query
3489 *
3490 * Returns maximum payload size in bytes
3491 *    or appropriate error value.
3492 */
3493int pcie_get_mps(struct pci_dev *dev)
3494{
3495	int ret, cap;
3496	u16 ctl;
3497
3498	cap = pci_pcie_cap(dev);
3499	if (!cap)
3500		return -EINVAL;
3501
3502	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3503	if (!ret)
3504		ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3505
3506	return ret;
3507}
 
3508
3509/**
3510 * pcie_set_mps - set PCI Express maximum payload size
3511 * @dev: PCI device to query
3512 * @mps: maximum payload size in bytes
3513 *    valid values are 128, 256, 512, 1024, 2048, 4096
3514 *
3515 * If possible sets maximum payload size
3516 */
3517int pcie_set_mps(struct pci_dev *dev, int mps)
3518{
3519	int cap, err = -EINVAL;
3520	u16 ctl, v;
3521
3522	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3523		goto out;
3524
3525	v = ffs(mps) - 8;
3526	if (v > dev->pcie_mpss) 
3527		goto out;
3528	v <<= 5;
3529
3530	cap = pci_pcie_cap(dev);
3531	if (!cap)
3532		goto out;
3533
3534	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3535	if (err)
3536		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3537
3538	if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3539		ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3540		ctl |= v;
3541		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3542	}
3543out:
3544	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3545}
3546
3547/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3548 * pci_select_bars - Make BAR mask from the type of resource
3549 * @dev: the PCI device for which BAR mask is made
3550 * @flags: resource type mask to be selected
3551 *
3552 * This helper routine makes bar mask from the type of resource.
3553 */
3554int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3555{
3556	int i, bars = 0;
3557	for (i = 0; i < PCI_NUM_RESOURCES; i++)
3558		if (pci_resource_flags(dev, i) & flags)
3559			bars |= (1 << i);
3560	return bars;
3561}
3562
3563/**
3564 * pci_resource_bar - get position of the BAR associated with a resource
3565 * @dev: the PCI device
3566 * @resno: the resource number
3567 * @type: the BAR type to be filled in
3568 *
3569 * Returns BAR position in config space, or 0 if the BAR is invalid.
3570 */
3571int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3572{
3573	int reg;
3574
3575	if (resno < PCI_ROM_RESOURCE) {
3576		*type = pci_bar_unknown;
3577		return PCI_BASE_ADDRESS_0 + 4 * resno;
3578	} else if (resno == PCI_ROM_RESOURCE) {
3579		*type = pci_bar_mem32;
3580		return dev->rom_base_reg;
3581	} else if (resno < PCI_BRIDGE_RESOURCES) {
3582		/* device specific resource */
3583		reg = pci_iov_resource_bar(dev, resno, type);
3584		if (reg)
3585			return reg;
3586	}
3587
3588	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3589	return 0;
3590}
3591
3592/* Some architectures require additional programming to enable VGA */
3593static arch_set_vga_state_t arch_set_vga_state;
3594
3595void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3596{
3597	arch_set_vga_state = func;	/* NULL disables */
3598}
3599
3600static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3601		      unsigned int command_bits, u32 flags)
3602{
3603	if (arch_set_vga_state)
3604		return arch_set_vga_state(dev, decode, command_bits,
3605						flags);
3606	return 0;
3607}
3608
3609/**
3610 * pci_set_vga_state - set VGA decode state on device and parents if requested
3611 * @dev: the PCI device
3612 * @decode: true = enable decoding, false = disable decoding
3613 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3614 * @flags: traverse ancestors and change bridges
3615 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3616 */
3617int pci_set_vga_state(struct pci_dev *dev, bool decode,
3618		      unsigned int command_bits, u32 flags)
3619{
3620	struct pci_bus *bus;
3621	struct pci_dev *bridge;
3622	u16 cmd;
3623	int rc;
3624
3625	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3626
3627	/* ARCH specific VGA enables */
3628	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3629	if (rc)
3630		return rc;
3631
3632	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3633		pci_read_config_word(dev, PCI_COMMAND, &cmd);
3634		if (decode == true)
3635			cmd |= command_bits;
3636		else
3637			cmd &= ~command_bits;
3638		pci_write_config_word(dev, PCI_COMMAND, cmd);
3639	}
3640
3641	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3642		return 0;
3643
3644	bus = dev->bus;
3645	while (bus) {
3646		bridge = bus->self;
3647		if (bridge) {
3648			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3649					     &cmd);
3650			if (decode == true)
3651				cmd |= PCI_BRIDGE_CTL_VGA;
3652			else
3653				cmd &= ~PCI_BRIDGE_CTL_VGA;
3654			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3655					      cmd);
3656		}
3657		bus = bus->parent;
3658	}
3659	return 0;
3660}
3661
3662#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3663static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3664static DEFINE_SPINLOCK(resource_alignment_lock);
3665
3666/**
3667 * pci_specified_resource_alignment - get resource alignment specified by user.
3668 * @dev: the PCI device to get
 
3669 *
3670 * RETURNS: Resource alignment if it is specified.
3671 *          Zero if it is not specified.
3672 */
3673resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
 
3674{
3675	int seg, bus, slot, func, align_order, count;
3676	resource_size_t align = 0;
3677	char *p;
 
3678
3679	spin_lock(&resource_alignment_lock);
3680	p = resource_alignment_param;
 
 
 
 
 
 
 
 
3681	while (*p) {
3682		count = 0;
3683		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3684							p[count] == '@') {
3685			p += count + 1;
3686		} else {
3687			align_order = -1;
3688		}
3689		if (sscanf(p, "%x:%x:%x.%x%n",
3690			&seg, &bus, &slot, &func, &count) != 4) {
3691			seg = 0;
3692			if (sscanf(p, "%x:%x.%x%n",
3693					&bus, &slot, &func, &count) != 3) {
3694				/* Invalid format */
3695				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3696					p);
3697				break;
3698			}
 
 
3699		}
3700		p += count;
3701		if (seg == pci_domain_nr(dev->bus) &&
3702			bus == dev->bus->number &&
3703			slot == PCI_SLOT(dev->devfn) &&
3704			func == PCI_FUNC(dev->devfn)) {
3705			if (align_order == -1) {
3706				align = PAGE_SIZE;
3707			} else {
3708				align = 1 << align_order;
3709			}
3710			/* Found */
3711			break;
3712		}
 
3713		if (*p != ';' && *p != ',') {
3714			/* End of param or invalid format */
3715			break;
3716		}
3717		p++;
3718	}
 
3719	spin_unlock(&resource_alignment_lock);
3720	return align;
3721}
3722
3723/**
3724 * pci_is_reassigndev - check if specified PCI is target device to reassign
3725 * @dev: the PCI device to check
3726 *
3727 * RETURNS: non-zero for PCI device is a target device to reassign,
3728 *          or zero is not.
3729 */
3730int pci_is_reassigndev(struct pci_dev *dev)
3731{
3732	return (pci_specified_resource_alignment(dev) != 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3733}
3734
3735/*
3736 * This function disables memory decoding and releases memory resources
3737 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3738 * It also rounds up size to specified alignment.
3739 * Later on, the kernel will assign page-aligned memory resource back
3740 * to the device.
3741 */
3742void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3743{
3744	int i;
3745	struct resource *r;
3746	resource_size_t align, size;
3747	u16 command;
 
3748
3749	if (!pci_is_reassigndev(dev))
 
 
 
 
 
 
 
 
 
 
 
3750		return;
3751
3752	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3753	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3754		dev_warn(&dev->dev,
3755			"Can't reassign resources to host bridge.\n");
3756		return;
3757	}
3758
3759	dev_info(&dev->dev,
3760		"Disabling memory decoding and releasing memory resources.\n");
3761	pci_read_config_word(dev, PCI_COMMAND, &command);
3762	command &= ~PCI_COMMAND_MEMORY;
3763	pci_write_config_word(dev, PCI_COMMAND, command);
3764
3765	align = pci_specified_resource_alignment(dev);
3766	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3767		r = &dev->resource[i];
3768		if (!(r->flags & IORESOURCE_MEM))
3769			continue;
3770		size = resource_size(r);
3771		if (size < align) {
3772			size = align;
3773			dev_info(&dev->dev,
3774				"Rounding up size of resource #%d to %#llx.\n",
3775				i, (unsigned long long)size);
3776		}
3777		r->end = size - 1;
3778		r->start = 0;
3779	}
3780	/* Need to disable bridge's resource window,
3781	 * to enable the kernel to reassign new resource
3782	 * window later on.
3783	 */
3784	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3785	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3786		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3787			r = &dev->resource[i];
3788			if (!(r->flags & IORESOURCE_MEM))
3789				continue;
 
3790			r->end = resource_size(r) - 1;
3791			r->start = 0;
3792		}
3793		pci_disable_bridge_window(dev);
3794	}
3795}
3796
3797ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3798{
3799	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3800		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3801	spin_lock(&resource_alignment_lock);
3802	strncpy(resource_alignment_param, buf, count);
3803	resource_alignment_param[count] = '\0';
3804	spin_unlock(&resource_alignment_lock);
 
3805	return count;
3806}
3807
3808ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
 
3809{
3810	size_t count;
 
 
 
 
 
 
 
 
 
 
 
 
3811	spin_lock(&resource_alignment_lock);
3812	count = snprintf(buf, size, "%s", resource_alignment_param);
 
 
 
 
 
 
3813	spin_unlock(&resource_alignment_lock);
3814	return count;
3815}
3816
3817static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3818{
3819	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3820}
3821
3822static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3823					const char *buf, size_t count)
3824{
3825	return pci_set_resource_alignment_param(buf, count);
3826}
3827
3828BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3829					pci_resource_alignment_store);
3830
3831static int __init pci_resource_alignment_sysfs_init(void)
3832{
3833	return bus_create_file(&pci_bus_type,
3834					&bus_attr_resource_alignment);
3835}
3836
3837late_initcall(pci_resource_alignment_sysfs_init);
3838
3839static void __devinit pci_no_domains(void)
3840{
3841#ifdef CONFIG_PCI_DOMAINS
3842	pci_domains_supported = 0;
3843#endif
3844}
3845
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3846/**
3847 * pci_ext_cfg_enabled - can we access extended PCI config space?
3848 * @dev: The PCI device of the root bridge.
3849 *
3850 * Returns 1 if we can access PCI extended config space (offsets
3851 * greater than 0xff). This is the default implementation. Architecture
3852 * implementations can override this.
3853 */
3854int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3855{
3856	return 1;
3857}
3858
3859void __weak pci_fixup_cardbus(struct pci_bus *bus)
3860{
3861}
3862EXPORT_SYMBOL(pci_fixup_cardbus);
3863
3864static int __init pci_setup(char *str)
3865{
3866	while (str) {
3867		char *k = strchr(str, ',');
3868		if (k)
3869			*k++ = 0;
3870		if (*str && (str = pcibios_setup(str)) && *str) {
3871			if (!strcmp(str, "nomsi")) {
3872				pci_no_msi();
 
 
 
3873			} else if (!strcmp(str, "noaer")) {
3874				pci_no_aer();
 
 
3875			} else if (!strncmp(str, "realloc=", 8)) {
3876				pci_realloc_get_opt(str + 8);
3877			} else if (!strncmp(str, "realloc", 7)) {
3878				pci_realloc_get_opt("on");
3879			} else if (!strcmp(str, "nodomains")) {
3880				pci_no_domains();
3881			} else if (!strncmp(str, "noari", 5)) {
3882				pcie_ari_disabled = true;
3883			} else if (!strncmp(str, "cbiosize=", 9)) {
3884				pci_cardbus_io_size = memparse(str + 9, &str);
3885			} else if (!strncmp(str, "cbmemsize=", 10)) {
3886				pci_cardbus_mem_size = memparse(str + 10, &str);
3887			} else if (!strncmp(str, "resource_alignment=", 19)) {
3888				pci_set_resource_alignment_param(str + 19,
3889							strlen(str + 19));
3890			} else if (!strncmp(str, "ecrc=", 5)) {
3891				pcie_ecrc_get_policy(str + 5);
3892			} else if (!strncmp(str, "hpiosize=", 9)) {
3893				pci_hotplug_io_size = memparse(str + 9, &str);
 
 
 
 
3894			} else if (!strncmp(str, "hpmemsize=", 10)) {
3895				pci_hotplug_mem_size = memparse(str + 10, &str);
 
 
 
 
 
 
3896			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3897				pcie_bus_config = PCIE_BUS_TUNE_OFF;
3898			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
3899				pcie_bus_config = PCIE_BUS_SAFE;
3900			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
3901				pcie_bus_config = PCIE_BUS_PERFORMANCE;
3902			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3903				pcie_bus_config = PCIE_BUS_PEER2PEER;
3904			} else if (!strncmp(str, "pcie_scan_all", 13)) {
3905				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
 
 
3906			} else {
3907				printk(KERN_ERR "PCI: Unknown option `%s'\n",
3908						str);
3909			}
3910		}
3911		str = k;
3912	}
3913	return 0;
3914}
3915early_param("pci", pci_setup);
3916
3917EXPORT_SYMBOL(pci_reenable_device);
3918EXPORT_SYMBOL(pci_enable_device_io);
3919EXPORT_SYMBOL(pci_enable_device_mem);
3920EXPORT_SYMBOL(pci_enable_device);
3921EXPORT_SYMBOL(pcim_enable_device);
3922EXPORT_SYMBOL(pcim_pin_device);
3923EXPORT_SYMBOL(pci_disable_device);
3924EXPORT_SYMBOL(pci_find_capability);
3925EXPORT_SYMBOL(pci_bus_find_capability);
3926EXPORT_SYMBOL(pci_release_regions);
3927EXPORT_SYMBOL(pci_request_regions);
3928EXPORT_SYMBOL(pci_request_regions_exclusive);
3929EXPORT_SYMBOL(pci_release_region);
3930EXPORT_SYMBOL(pci_request_region);
3931EXPORT_SYMBOL(pci_request_region_exclusive);
3932EXPORT_SYMBOL(pci_release_selected_regions);
3933EXPORT_SYMBOL(pci_request_selected_regions);
3934EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3935EXPORT_SYMBOL(pci_set_master);
3936EXPORT_SYMBOL(pci_clear_master);
3937EXPORT_SYMBOL(pci_set_mwi);
3938EXPORT_SYMBOL(pci_try_set_mwi);
3939EXPORT_SYMBOL(pci_clear_mwi);
3940EXPORT_SYMBOL_GPL(pci_intx);
3941EXPORT_SYMBOL(pci_assign_resource);
3942EXPORT_SYMBOL(pci_find_parent_resource);
3943EXPORT_SYMBOL(pci_select_bars);
3944
3945EXPORT_SYMBOL(pci_set_power_state);
3946EXPORT_SYMBOL(pci_save_state);
3947EXPORT_SYMBOL(pci_restore_state);
3948EXPORT_SYMBOL(pci_pme_capable);
3949EXPORT_SYMBOL(pci_pme_active);
3950EXPORT_SYMBOL(pci_wake_from_d3);
3951EXPORT_SYMBOL(pci_target_state);
3952EXPORT_SYMBOL(pci_prepare_to_sleep);
3953EXPORT_SYMBOL(pci_back_from_sleep);
3954EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI Bus Services, see include/linux/pci.h for further explanation.
   4 *
   5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   6 * David Mosberger-Tang
   7 *
   8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   9 */
  10
  11#include <linux/acpi.h>
  12#include <linux/kernel.h>
  13#include <linux/delay.h>
  14#include <linux/dmi.h>
  15#include <linux/init.h>
  16#include <linux/msi.h>
  17#include <linux/of.h>
  18#include <linux/pci.h>
  19#include <linux/pm.h>
  20#include <linux/slab.h>
  21#include <linux/module.h>
  22#include <linux/spinlock.h>
  23#include <linux/string.h>
  24#include <linux/log2.h>
  25#include <linux/logic_pio.h>
  26#include <linux/pm_wakeup.h>
  27#include <linux/interrupt.h>
  28#include <linux/device.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/pci_hotplug.h>
  31#include <linux/vmalloc.h>
  32#include <asm/dma.h>
  33#include <linux/aer.h>
  34#include "pci.h"
  35
  36DEFINE_MUTEX(pci_slot_mutex);
  37
  38const char *pci_power_names[] = {
  39	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  40};
  41EXPORT_SYMBOL_GPL(pci_power_names);
  42
  43int isa_dma_bridge_buggy;
  44EXPORT_SYMBOL(isa_dma_bridge_buggy);
  45
  46int pci_pci_problems;
  47EXPORT_SYMBOL(pci_pci_problems);
  48
  49unsigned int pci_pm_d3hot_delay;
  50
  51static void pci_pme_list_scan(struct work_struct *work);
  52
  53static LIST_HEAD(pci_pme_list);
  54static DEFINE_MUTEX(pci_pme_list_mutex);
  55static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  56
  57struct pci_pme_device {
  58	struct list_head list;
  59	struct pci_dev *dev;
  60};
  61
  62#define PME_TIMEOUT 1000 /* How long between PME checks */
  63
  64static void pci_dev_d3_sleep(struct pci_dev *dev)
  65{
  66	unsigned int delay = dev->d3hot_delay;
  67
  68	if (delay < pci_pm_d3hot_delay)
  69		delay = pci_pm_d3hot_delay;
  70
  71	if (delay)
  72		msleep(delay);
  73}
  74
  75#ifdef CONFIG_PCI_DOMAINS
  76int pci_domains_supported = 1;
  77#endif
  78
  79#define DEFAULT_CARDBUS_IO_SIZE		(256)
  80#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  81/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  82unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  83unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  84
  85#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  86#define DEFAULT_HOTPLUG_MMIO_SIZE	(2*1024*1024)
  87#define DEFAULT_HOTPLUG_MMIO_PREF_SIZE	(2*1024*1024)
  88/* hpiosize=nn can override this */
  89unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  90/*
  91 * pci=hpmmiosize=nnM overrides non-prefetchable MMIO size,
  92 * pci=hpmmioprefsize=nnM overrides prefetchable MMIO size;
  93 * pci=hpmemsize=nnM overrides both
  94 */
  95unsigned long pci_hotplug_mmio_size = DEFAULT_HOTPLUG_MMIO_SIZE;
  96unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
  97
  98#define DEFAULT_HOTPLUG_BUS_SIZE	1
  99unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
 100
 101
 102/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
 103#ifdef CONFIG_PCIE_BUS_TUNE_OFF
 104enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
 105#elif defined CONFIG_PCIE_BUS_SAFE
 106enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
 107#elif defined CONFIG_PCIE_BUS_PERFORMANCE
 108enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
 109#elif defined CONFIG_PCIE_BUS_PEER2PEER
 110enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
 111#else
 112enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
 113#endif
 114
 115/*
 116 * The default CLS is used if arch didn't set CLS explicitly and not
 117 * all pci devices agree on the same value.  Arch can override either
 118 * the dfl or actual value as it sees fit.  Don't forget this is
 119 * measured in 32-bit words, not bytes.
 120 */
 121u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
 122u8 pci_cache_line_size;
 123
 124/*
 125 * If we set up a device for bus mastering, we need to check the latency
 126 * timer as certain BIOSes forget to set it properly.
 127 */
 128unsigned int pcibios_max_latency = 255;
 129
 130/* If set, the PCIe ARI capability will not be used. */
 131static bool pcie_ari_disabled;
 132
 133/* If set, the PCIe ATS capability will not be used. */
 134static bool pcie_ats_disabled;
 135
 136/* If set, the PCI config space of each device is printed during boot. */
 137bool pci_early_dump;
 138
 139bool pci_ats_disabled(void)
 140{
 141	return pcie_ats_disabled;
 142}
 143EXPORT_SYMBOL_GPL(pci_ats_disabled);
 144
 145/* Disable bridge_d3 for all PCIe ports */
 146static bool pci_bridge_d3_disable;
 147/* Force bridge_d3 for all PCIe ports */
 148static bool pci_bridge_d3_force;
 149
 150static int __init pcie_port_pm_setup(char *str)
 151{
 152	if (!strcmp(str, "off"))
 153		pci_bridge_d3_disable = true;
 154	else if (!strcmp(str, "force"))
 155		pci_bridge_d3_force = true;
 156	return 1;
 157}
 158__setup("pcie_port_pm=", pcie_port_pm_setup);
 159
 160/* Time to wait after a reset for device to become responsive */
 161#define PCIE_RESET_READY_POLL_MS 60000
 162
 163/**
 164 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 165 * @bus: pointer to PCI bus structure to search
 166 *
 167 * Given a PCI bus, returns the highest PCI bus number present in the set
 168 * including the given PCI bus and its list of child PCI buses.
 169 */
 170unsigned char pci_bus_max_busnr(struct pci_bus *bus)
 171{
 172	struct pci_bus *tmp;
 173	unsigned char max, n;
 174
 175	max = bus->busn_res.end;
 176	list_for_each_entry(tmp, &bus->children, node) {
 177		n = pci_bus_max_busnr(tmp);
 178		if (n > max)
 179			max = n;
 180	}
 181	return max;
 182}
 183EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 184
 185/**
 186 * pci_status_get_and_clear_errors - return and clear error bits in PCI_STATUS
 187 * @pdev: the PCI device
 188 *
 189 * Returns error bits set in PCI_STATUS and clears them.
 190 */
 191int pci_status_get_and_clear_errors(struct pci_dev *pdev)
 192{
 193	u16 status;
 194	int ret;
 195
 196	ret = pci_read_config_word(pdev, PCI_STATUS, &status);
 197	if (ret != PCIBIOS_SUCCESSFUL)
 198		return -EIO;
 199
 200	status &= PCI_STATUS_ERROR_BITS;
 201	if (status)
 202		pci_write_config_word(pdev, PCI_STATUS, status);
 203
 204	return status;
 205}
 206EXPORT_SYMBOL_GPL(pci_status_get_and_clear_errors);
 207
 208#ifdef CONFIG_HAS_IOMEM
 209void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 210{
 211	struct resource *res = &pdev->resource[bar];
 212
 213	/*
 214	 * Make sure the BAR is actually a memory resource, not an IO resource
 215	 */
 216	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
 217		pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
 218		return NULL;
 219	}
 220	return ioremap(res->start, resource_size(res));
 221}
 222EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 223
 224void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
 225{
 226	/*
 227	 * Make sure the BAR is actually a memory resource, not an IO resource
 228	 */
 229	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 230		WARN_ON(1);
 231		return NULL;
 232	}
 233	return ioremap_wc(pci_resource_start(pdev, bar),
 234			  pci_resource_len(pdev, bar));
 235}
 236EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
 237#endif
 238
 
 239/**
 240 * pci_dev_str_match_path - test if a path string matches a device
 241 * @dev: the PCI device to test
 242 * @path: string to match the device against
 243 * @endptr: pointer to the string after the match
 244 *
 245 * Test if a string (typically from a kernel parameter) formatted as a
 246 * path of device/function addresses matches a PCI device. The string must
 247 * be of the form:
 248 *
 249 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
 250 *
 251 * A path for a device can be obtained using 'lspci -t'.  Using a path
 252 * is more robust against bus renumbering than using only a single bus,
 253 * device and function address.
 254 *
 255 * Returns 1 if the string matches the device, 0 if it does not and
 256 * a negative error code if it fails to parse the string.
 257 */
 258static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
 259				  const char **endptr)
 260{
 261	int ret;
 262	int seg, bus, slot, func;
 263	char *wpath, *p;
 264	char end;
 265
 266	*endptr = strchrnul(path, ';');
 267
 268	wpath = kmemdup_nul(path, *endptr - path, GFP_ATOMIC);
 269	if (!wpath)
 270		return -ENOMEM;
 271
 272	while (1) {
 273		p = strrchr(wpath, '/');
 274		if (!p)
 275			break;
 276		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
 277		if (ret != 2) {
 278			ret = -EINVAL;
 279			goto free_and_exit;
 280		}
 281
 282		if (dev->devfn != PCI_DEVFN(slot, func)) {
 283			ret = 0;
 284			goto free_and_exit;
 285		}
 286
 287		/*
 288		 * Note: we don't need to get a reference to the upstream
 289		 * bridge because we hold a reference to the top level
 290		 * device which should hold a reference to the bridge,
 291		 * and so on.
 292		 */
 293		dev = pci_upstream_bridge(dev);
 294		if (!dev) {
 295			ret = 0;
 296			goto free_and_exit;
 297		}
 298
 299		*p = 0;
 300	}
 301
 302	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
 303		     &func, &end);
 304	if (ret != 4) {
 305		seg = 0;
 306		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
 307		if (ret != 3) {
 308			ret = -EINVAL;
 309			goto free_and_exit;
 310		}
 311	}
 312
 313	ret = (seg == pci_domain_nr(dev->bus) &&
 314	       bus == dev->bus->number &&
 315	       dev->devfn == PCI_DEVFN(slot, func));
 316
 317free_and_exit:
 318	kfree(wpath);
 319	return ret;
 320}
 321
 322/**
 323 * pci_dev_str_match - test if a string matches a device
 324 * @dev: the PCI device to test
 325 * @p: string to match the device against
 326 * @endptr: pointer to the string after the match
 327 *
 328 * Test if a string (typically from a kernel parameter) matches a specified
 329 * PCI device. The string may be of one of the following formats:
 330 *
 331 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
 332 *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
 333 *
 334 * The first format specifies a PCI bus/device/function address which
 335 * may change if new hardware is inserted, if motherboard firmware changes,
 336 * or due to changes caused in kernel parameters. If the domain is
 337 * left unspecified, it is taken to be 0.  In order to be robust against
 338 * bus renumbering issues, a path of PCI device/function numbers may be used
 339 * to address the specific device.  The path for a device can be determined
 340 * through the use of 'lspci -t'.
 341 *
 342 * The second format matches devices using IDs in the configuration
 343 * space which may match multiple devices in the system. A value of 0
 344 * for any field will match all devices. (Note: this differs from
 345 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
 346 * legacy reasons and convenience so users don't have to specify
 347 * FFFFFFFFs on the command line.)
 348 *
 349 * Returns 1 if the string matches the device, 0 if it does not and
 350 * a negative error code if the string cannot be parsed.
 351 */
 352static int pci_dev_str_match(struct pci_dev *dev, const char *p,
 353			     const char **endptr)
 354{
 355	int ret;
 356	int count;
 357	unsigned short vendor, device, subsystem_vendor, subsystem_device;
 358
 359	if (strncmp(p, "pci:", 4) == 0) {
 360		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
 361		p += 4;
 362		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
 363			     &subsystem_vendor, &subsystem_device, &count);
 364		if (ret != 4) {
 365			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
 366			if (ret != 2)
 367				return -EINVAL;
 368
 369			subsystem_vendor = 0;
 370			subsystem_device = 0;
 371		}
 372
 373		p += count;
 374
 375		if ((!vendor || vendor == dev->vendor) &&
 376		    (!device || device == dev->device) &&
 377		    (!subsystem_vendor ||
 378			    subsystem_vendor == dev->subsystem_vendor) &&
 379		    (!subsystem_device ||
 380			    subsystem_device == dev->subsystem_device))
 381			goto found;
 382	} else {
 383		/*
 384		 * PCI Bus, Device, Function IDs are specified
 385		 * (optionally, may include a path of devfns following it)
 386		 */
 387		ret = pci_dev_str_match_path(dev, p, &p);
 388		if (ret < 0)
 389			return ret;
 390		else if (ret)
 391			goto found;
 392	}
 393
 394	*endptr = p;
 395	return 0;
 396
 397found:
 398	*endptr = p;
 399	return 1;
 400}
 401
 402static u8 __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 403				  u8 pos, int cap, int *ttl)
 404{
 405	u8 id;
 406	u16 ent;
 407
 408	pci_bus_read_config_byte(bus, devfn, pos, &pos);
 409
 410	while ((*ttl)--) {
 
 411		if (pos < 0x40)
 412			break;
 413		pos &= ~3;
 414		pci_bus_read_config_word(bus, devfn, pos, &ent);
 415
 416		id = ent & 0xff;
 417		if (id == 0xff)
 418			break;
 419		if (id == cap)
 420			return pos;
 421		pos = (ent >> 8);
 422	}
 423	return 0;
 424}
 425
 426static u8 __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 427			      u8 pos, int cap)
 428{
 429	int ttl = PCI_FIND_CAP_TTL;
 430
 431	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 432}
 433
 434u8 pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 435{
 436	return __pci_find_next_cap(dev->bus, dev->devfn,
 437				   pos + PCI_CAP_LIST_NEXT, cap);
 438}
 439EXPORT_SYMBOL_GPL(pci_find_next_capability);
 440
 441static u8 __pci_bus_find_cap_start(struct pci_bus *bus,
 442				    unsigned int devfn, u8 hdr_type)
 443{
 444	u16 status;
 445
 446	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 447	if (!(status & PCI_STATUS_CAP_LIST))
 448		return 0;
 449
 450	switch (hdr_type) {
 451	case PCI_HEADER_TYPE_NORMAL:
 452	case PCI_HEADER_TYPE_BRIDGE:
 453		return PCI_CAPABILITY_LIST;
 454	case PCI_HEADER_TYPE_CARDBUS:
 455		return PCI_CB_CAPABILITY_LIST;
 
 
 456	}
 457
 458	return 0;
 459}
 460
 461/**
 462 * pci_find_capability - query for devices' capabilities
 463 * @dev: PCI device to query
 464 * @cap: capability code
 465 *
 466 * Tell if a device supports a given PCI capability.
 467 * Returns the address of the requested capability structure within the
 468 * device's PCI configuration space or 0 in case the device does not
 469 * support it.  Possible values for @cap include:
 470 *
 471 *  %PCI_CAP_ID_PM           Power Management
 472 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
 473 *  %PCI_CAP_ID_VPD          Vital Product Data
 474 *  %PCI_CAP_ID_SLOTID       Slot Identification
 475 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 476 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
 477 *  %PCI_CAP_ID_PCIX         PCI-X
 478 *  %PCI_CAP_ID_EXP          PCI Express
 479 */
 480u8 pci_find_capability(struct pci_dev *dev, int cap)
 481{
 482	u8 pos;
 483
 484	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 485	if (pos)
 486		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 487
 488	return pos;
 489}
 490EXPORT_SYMBOL(pci_find_capability);
 491
 492/**
 493 * pci_bus_find_capability - query for devices' capabilities
 494 * @bus: the PCI bus to query
 495 * @devfn: PCI device to query
 496 * @cap: capability code
 497 *
 498 * Like pci_find_capability() but works for PCI devices that do not have a
 499 * pci_dev structure set up yet.
 500 *
 501 * Returns the address of the requested capability structure within the
 502 * device's PCI configuration space or 0 in case the device does not
 503 * support it.
 504 */
 505u8 pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 506{
 507	u8 hdr_type, pos;
 
 508
 509	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 510
 511	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 512	if (pos)
 513		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 514
 515	return pos;
 516}
 517EXPORT_SYMBOL(pci_bus_find_capability);
 518
 519/**
 520 * pci_find_next_ext_capability - Find an extended capability
 521 * @dev: PCI device to query
 522 * @start: address at which to start looking (0 to start at beginning of list)
 523 * @cap: capability code
 524 *
 525 * Returns the address of the next matching extended capability structure
 526 * within the device's PCI configuration space or 0 if the device does
 527 * not support it.  Some capabilities can occur several times, e.g., the
 528 * vendor-specific capability, and this provides a way to find them all.
 
 
 
 
 529 */
 530u16 pci_find_next_ext_capability(struct pci_dev *dev, u16 start, int cap)
 531{
 532	u32 header;
 533	int ttl;
 534	u16 pos = PCI_CFG_SPACE_SIZE;
 535
 536	/* minimum 8 bytes per capability */
 537	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 538
 539	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 540		return 0;
 541
 542	if (start)
 543		pos = start;
 544
 545	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 546		return 0;
 547
 548	/*
 549	 * If we have no capabilities, this is indicated by cap ID,
 550	 * cap version and next pointer all being 0.
 551	 */
 552	if (header == 0)
 553		return 0;
 554
 555	while (ttl-- > 0) {
 556		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 557			return pos;
 558
 559		pos = PCI_EXT_CAP_NEXT(header);
 560		if (pos < PCI_CFG_SPACE_SIZE)
 561			break;
 562
 563		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 564			break;
 565	}
 566
 567	return 0;
 568}
 569EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 570
 571/**
 572 * pci_find_ext_capability - Find an extended capability
 573 * @dev: PCI device to query
 574 * @cap: capability code
 
 575 *
 576 * Returns the address of the requested extended capability structure
 577 * within the device's PCI configuration space or 0 if the device does
 578 * not support it.  Possible values for @cap include:
 579 *
 580 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 581 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 582 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 583 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 584 */
 585u16 pci_find_ext_capability(struct pci_dev *dev, int cap)
 
 586{
 587	return pci_find_next_ext_capability(dev, 0, cap);
 588}
 589EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 590
 591/**
 592 * pci_get_dsn - Read and return the 8-byte Device Serial Number
 593 * @dev: PCI device to query
 594 *
 595 * Looks up the PCI_EXT_CAP_ID_DSN and reads the 8 bytes of the Device Serial
 596 * Number.
 597 *
 598 * Returns the DSN, or zero if the capability does not exist.
 599 */
 600u64 pci_get_dsn(struct pci_dev *dev)
 601{
 602	u32 dword;
 603	u64 dsn;
 604	int pos;
 605
 606	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_DSN);
 607	if (!pos)
 
 608		return 0;
 609
 610	/*
 611	 * The Device Serial Number is two dwords offset 4 bytes from the
 612	 * capability position. The specification says that the first dword is
 613	 * the lower half, and the second dword is the upper half.
 614	 */
 615	pos += 4;
 616	pci_read_config_dword(dev, pos, &dword);
 617	dsn = (u64)dword;
 618	pci_read_config_dword(dev, pos + 4, &dword);
 619	dsn |= ((u64)dword) << 32;
 
 620
 621	return dsn;
 622}
 623EXPORT_SYMBOL_GPL(pci_get_dsn);
 624
 625static u8 __pci_find_next_ht_cap(struct pci_dev *dev, u8 pos, int ht_cap)
 626{
 627	int rc, ttl = PCI_FIND_CAP_TTL;
 628	u8 cap, mask;
 629
 630	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 631		mask = HT_3BIT_CAP_MASK;
 632	else
 633		mask = HT_5BIT_CAP_MASK;
 634
 635	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 636				      PCI_CAP_ID_HT, &ttl);
 637	while (pos) {
 638		rc = pci_read_config_byte(dev, pos + 3, &cap);
 639		if (rc != PCIBIOS_SUCCESSFUL)
 640			return 0;
 641
 642		if ((cap & mask) == ht_cap)
 643			return pos;
 644
 645		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 646					      pos + PCI_CAP_LIST_NEXT,
 647					      PCI_CAP_ID_HT, &ttl);
 648	}
 649
 650	return 0;
 651}
 652
 653/**
 654 * pci_find_next_ht_capability - query a device's HyperTransport capabilities
 655 * @dev: PCI device to query
 656 * @pos: Position from which to continue searching
 657 * @ht_cap: HyperTransport capability code
 658 *
 659 * To be used in conjunction with pci_find_ht_capability() to search for
 660 * all capabilities matching @ht_cap. @pos should always be a value returned
 661 * from pci_find_ht_capability().
 662 *
 663 * NB. To be 100% safe against broken PCI devices, the caller should take
 664 * steps to avoid an infinite loop.
 665 */
 666u8 pci_find_next_ht_capability(struct pci_dev *dev, u8 pos, int ht_cap)
 667{
 668	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 669}
 670EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 671
 672/**
 673 * pci_find_ht_capability - query a device's HyperTransport capabilities
 674 * @dev: PCI device to query
 675 * @ht_cap: HyperTransport capability code
 676 *
 677 * Tell if a device supports a given HyperTransport capability.
 678 * Returns an address within the device's PCI configuration space
 679 * or 0 in case the device does not support the request capability.
 680 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 681 * which has a HyperTransport capability matching @ht_cap.
 682 */
 683u8 pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 684{
 685	u8 pos;
 686
 687	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 688	if (pos)
 689		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 690
 691	return pos;
 692}
 693EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 694
 695/**
 696 * pci_find_vsec_capability - Find a vendor-specific extended capability
 697 * @dev: PCI device to query
 698 * @vendor: Vendor ID for which capability is defined
 699 * @cap: Vendor-specific capability ID
 700 *
 701 * If @dev has Vendor ID @vendor, search for a VSEC capability with
 702 * VSEC ID @cap. If found, return the capability offset in
 703 * config space; otherwise return 0.
 704 */
 705u16 pci_find_vsec_capability(struct pci_dev *dev, u16 vendor, int cap)
 706{
 707	u16 vsec = 0;
 708	u32 header;
 709
 710	if (vendor != dev->vendor)
 711		return 0;
 712
 713	while ((vsec = pci_find_next_ext_capability(dev, vsec,
 714						     PCI_EXT_CAP_ID_VNDR))) {
 715		if (pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER,
 716					  &header) == PCIBIOS_SUCCESSFUL &&
 717		    PCI_VNDR_HEADER_ID(header) == cap)
 718			return vsec;
 719	}
 720
 721	return 0;
 722}
 723EXPORT_SYMBOL_GPL(pci_find_vsec_capability);
 724
 725/**
 726 * pci_find_parent_resource - return resource region of parent bus of given
 727 *			      region
 728 * @dev: PCI device structure contains resources to be searched
 729 * @res: child resource record for which parent is sought
 730 *
 731 * For given resource region of given device, return the resource region of
 732 * parent bus the given region is contained in.
 
 733 */
 734struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 735					  struct resource *res)
 736{
 737	const struct pci_bus *bus = dev->bus;
 738	struct resource *r;
 739	int i;
 
 740
 741	pci_bus_for_each_resource(bus, r, i) {
 742		if (!r)
 743			continue;
 744		if (resource_contains(r, res)) {
 745
 746			/*
 747			 * If the window is prefetchable but the BAR is
 748			 * not, the allocator made a mistake.
 749			 */
 750			if (r->flags & IORESOURCE_PREFETCH &&
 751			    !(res->flags & IORESOURCE_PREFETCH))
 752				return NULL;
 753
 754			/*
 755			 * If we're below a transparent bridge, there may
 756			 * be both a positively-decoded aperture and a
 757			 * subtractively-decoded region that contain the BAR.
 758			 * We want the positively-decoded one, so this depends
 759			 * on pci_bus_for_each_resource() giving us those
 760			 * first.
 761			 */
 762			return r;
 763		}
 764	}
 765	return NULL;
 766}
 767EXPORT_SYMBOL(pci_find_parent_resource);
 768
 769/**
 770 * pci_find_resource - Return matching PCI device resource
 771 * @dev: PCI device to query
 772 * @res: Resource to look for
 773 *
 774 * Goes over standard PCI resources (BARs) and checks if the given resource
 775 * is partially or fully contained in any of them. In that case the
 776 * matching resource is returned, %NULL otherwise.
 777 */
 778struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
 779{
 780	int i;
 781
 782	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
 783		struct resource *r = &dev->resource[i];
 784
 785		if (r->start && resource_contains(r, res))
 786			return r;
 787	}
 788
 789	return NULL;
 790}
 791EXPORT_SYMBOL(pci_find_resource);
 792
 793/**
 794 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
 795 * @dev: the PCI device to operate on
 796 * @pos: config space offset of status word
 797 * @mask: mask of bit(s) to care about in status word
 798 *
 799 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
 800 */
 801int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
 802{
 803	int i;
 804
 805	/* Wait for Transaction Pending bit clean */
 806	for (i = 0; i < 4; i++) {
 807		u16 status;
 808		if (i)
 809			msleep((1 << (i - 1)) * 100);
 810
 811		pci_read_config_word(dev, pos, &status);
 812		if (!(status & mask))
 813			return 1;
 814	}
 815
 816	return 0;
 817}
 818
 819static int pci_acs_enable;
 820
 821/**
 822 * pci_request_acs - ask for ACS to be enabled if supported
 823 */
 824void pci_request_acs(void)
 825{
 826	pci_acs_enable = 1;
 827}
 828
 829static const char *disable_acs_redir_param;
 830
 831/**
 832 * pci_disable_acs_redir - disable ACS redirect capabilities
 833 * @dev: the PCI device
 834 *
 835 * For only devices specified in the disable_acs_redir parameter.
 836 */
 837static void pci_disable_acs_redir(struct pci_dev *dev)
 838{
 839	int ret = 0;
 840	const char *p;
 841	int pos;
 842	u16 ctrl;
 843
 844	if (!disable_acs_redir_param)
 845		return;
 846
 847	p = disable_acs_redir_param;
 848	while (*p) {
 849		ret = pci_dev_str_match(dev, p, &p);
 850		if (ret < 0) {
 851			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
 852				     disable_acs_redir_param);
 853
 854			break;
 855		} else if (ret == 1) {
 856			/* Found a match */
 857			break;
 858		}
 859
 860		if (*p != ';' && *p != ',') {
 861			/* End of param or invalid format */
 862			break;
 863		}
 864		p++;
 865	}
 866
 867	if (ret != 1)
 868		return;
 869
 870	if (!pci_dev_specific_disable_acs_redir(dev))
 871		return;
 872
 873	pos = dev->acs_cap;
 874	if (!pos) {
 875		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
 876		return;
 877	}
 878
 879	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 880
 881	/* P2P Request & Completion Redirect */
 882	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
 883
 884	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 885
 886	pci_info(dev, "disabled ACS redirect\n");
 887}
 888
 889/**
 890 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
 891 * @dev: the PCI device
 892 */
 893static void pci_std_enable_acs(struct pci_dev *dev)
 894{
 895	int pos;
 896	u16 cap;
 897	u16 ctrl;
 898
 899	pos = dev->acs_cap;
 900	if (!pos)
 901		return;
 902
 903	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
 904	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 905
 906	/* Source Validation */
 907	ctrl |= (cap & PCI_ACS_SV);
 908
 909	/* P2P Request Redirect */
 910	ctrl |= (cap & PCI_ACS_RR);
 911
 912	/* P2P Completion Redirect */
 913	ctrl |= (cap & PCI_ACS_CR);
 914
 915	/* Upstream Forwarding */
 916	ctrl |= (cap & PCI_ACS_UF);
 917
 918	/* Enable Translation Blocking for external devices */
 919	if (dev->external_facing || dev->untrusted)
 920		ctrl |= (cap & PCI_ACS_TB);
 921
 922	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 923}
 924
 925/**
 926 * pci_enable_acs - enable ACS if hardware support it
 927 * @dev: the PCI device
 928 */
 929static void pci_enable_acs(struct pci_dev *dev)
 930{
 931	if (!pci_acs_enable)
 932		goto disable_acs_redir;
 933
 934	if (!pci_dev_specific_enable_acs(dev))
 935		goto disable_acs_redir;
 936
 937	pci_std_enable_acs(dev);
 938
 939disable_acs_redir:
 940	/*
 941	 * Note: pci_disable_acs_redir() must be called even if ACS was not
 942	 * enabled by the kernel because it may have been enabled by
 943	 * platform firmware.  So if we are told to disable it, we should
 944	 * always disable it after setting the kernel's default
 945	 * preferences.
 946	 */
 947	pci_disable_acs_redir(dev);
 948}
 949
 950/**
 951 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
 952 * @dev: PCI device to have its BARs restored
 953 *
 954 * Restore the BAR values for a given device, so as to make it
 955 * accessible by its driver.
 956 */
 957static void pci_restore_bars(struct pci_dev *dev)
 
 958{
 959	int i;
 960
 961	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 962		pci_update_resource(dev, i);
 963}
 964
 965static const struct pci_platform_pm_ops *pci_platform_pm;
 966
 967int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
 968{
 969	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
 970	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
 971		return -EINVAL;
 972	pci_platform_pm = ops;
 973	return 0;
 974}
 975
 976static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 977{
 978	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 979}
 980
 981static inline int platform_pci_set_power_state(struct pci_dev *dev,
 982					       pci_power_t t)
 983{
 984	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 985}
 986
 987static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
 988{
 989	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
 990}
 991
 992static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
 993{
 994	if (pci_platform_pm && pci_platform_pm->refresh_state)
 995		pci_platform_pm->refresh_state(dev);
 996}
 997
 998static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 999{
1000	return pci_platform_pm ?
1001			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
1002}
1003
1004static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
1005{
1006	return pci_platform_pm ?
1007			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
1008}
1009
1010static inline bool platform_pci_need_resume(struct pci_dev *dev)
1011{
1012	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
 
1013}
1014
1015static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
1016{
1017	if (pci_platform_pm && pci_platform_pm->bridge_d3)
1018		return pci_platform_pm->bridge_d3(dev);
1019	return false;
1020}
1021
1022/**
1023 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
1024 *			     given PCI device
1025 * @dev: PCI device to handle.
1026 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1027 *
1028 * RETURN VALUE:
1029 * -EINVAL if the requested state is invalid.
1030 * -EIO if device does not support PCI PM or its PM capabilities register has a
1031 * wrong version, or device doesn't support the requested state.
1032 * 0 if device already is in the requested state.
1033 * 0 if device's power state has been successfully changed.
1034 */
1035static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1036{
1037	u16 pmcsr;
1038	bool need_restore = false;
1039
1040	/* Check if we're already there */
1041	if (dev->current_state == state)
1042		return 0;
1043
1044	if (!dev->pm_cap)
1045		return -EIO;
1046
1047	if (state < PCI_D0 || state > PCI_D3hot)
1048		return -EINVAL;
1049
1050	/*
1051	 * Validate transition: We can enter D0 from any state, but if
1052	 * we're already in a low-power state, we can only go deeper.  E.g.,
1053	 * we can go from D1 to D3, but we can't go directly from D3 to D1;
1054	 * we'd have to go from D3 to D0, then to D1.
1055	 */
1056	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
1057	    && dev->current_state > state) {
1058		pci_err(dev, "invalid power transition (from %s to %s)\n",
1059			pci_power_name(dev->current_state),
1060			pci_power_name(state));
1061		return -EINVAL;
1062	}
1063
1064	/* Check if this device supports the desired state */
1065	if ((state == PCI_D1 && !dev->d1_support)
1066	   || (state == PCI_D2 && !dev->d2_support))
1067		return -EIO;
1068
1069	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1070	if (pmcsr == (u16) ~0) {
1071		pci_err(dev, "can't change power state from %s to %s (config space inaccessible)\n",
1072			pci_power_name(dev->current_state),
1073			pci_power_name(state));
1074		return -EIO;
1075	}
1076
1077	/*
1078	 * If we're (effectively) in D3, force entire word to 0.
1079	 * This doesn't affect PME_Status, disables PME_En, and
1080	 * sets PowerState to 0.
1081	 */
1082	switch (dev->current_state) {
1083	case PCI_D0:
1084	case PCI_D1:
1085	case PCI_D2:
1086		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1087		pmcsr |= state;
1088		break;
1089	case PCI_D3hot:
1090	case PCI_D3cold:
1091	case PCI_UNKNOWN: /* Boot-up */
1092		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
1093		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
1094			need_restore = true;
1095		fallthrough;	/* force to D0 */
1096	default:
1097		pmcsr = 0;
1098		break;
1099	}
1100
1101	/* Enter specified state */
1102	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1103
1104	/*
1105	 * Mandatory power management transition delays; see PCI PM 1.1
1106	 * 5.6.1 table 18
1107	 */
1108	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1109		pci_dev_d3_sleep(dev);
1110	else if (state == PCI_D2 || dev->current_state == PCI_D2)
1111		udelay(PCI_PM_D2_DELAY);
1112
1113	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1114	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1115	if (dev->current_state != state)
1116		pci_info_ratelimited(dev, "refused to change power state from %s to %s\n",
1117			 pci_power_name(dev->current_state),
1118			 pci_power_name(state));
1119
1120	/*
1121	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
1122	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
1123	 * from D3hot to D0 _may_ perform an internal reset, thereby
1124	 * going to "D0 Uninitialized" rather than "D0 Initialized".
1125	 * For example, at least some versions of the 3c905B and the
1126	 * 3c556B exhibit this behaviour.
1127	 *
1128	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
1129	 * devices in a D3hot state at boot.  Consequently, we need to
1130	 * restore at least the BARs so that the device will be
1131	 * accessible to its driver.
1132	 */
1133	if (need_restore)
1134		pci_restore_bars(dev);
1135
1136	if (dev->bus->self)
1137		pcie_aspm_pm_state_change(dev->bus->self);
1138
1139	return 0;
1140}
1141
1142/**
1143 * pci_update_current_state - Read power state of given device and cache it
 
1144 * @dev: PCI device to handle.
1145 * @state: State to cache in case the device doesn't have the PM capability
1146 *
1147 * The power state is read from the PMCSR register, which however is
1148 * inaccessible in D3cold.  The platform firmware is therefore queried first
1149 * to detect accessibility of the register.  In case the platform firmware
1150 * reports an incorrect state or the device isn't power manageable by the
1151 * platform at all, we try to detect D3cold by testing accessibility of the
1152 * vendor ID in config space.
1153 */
1154void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
1155{
1156	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
1157	    !pci_device_is_present(dev)) {
1158		dev->current_state = PCI_D3cold;
1159	} else if (dev->pm_cap) {
1160		u16 pmcsr;
1161
1162		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1163		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1164	} else {
1165		dev->current_state = state;
1166	}
1167}
1168
1169/**
1170 * pci_refresh_power_state - Refresh the given device's power state data
1171 * @dev: Target PCI device.
1172 *
1173 * Ask the platform to refresh the devices power state information and invoke
1174 * pci_update_current_state() to update its current PCI power state.
1175 */
1176void pci_refresh_power_state(struct pci_dev *dev)
1177{
1178	if (platform_pci_power_manageable(dev))
1179		platform_pci_refresh_power_state(dev);
1180
1181	pci_update_current_state(dev, dev->current_state);
1182}
1183
1184/**
1185 * pci_platform_power_transition - Use platform to change device power state
1186 * @dev: PCI device to handle.
1187 * @state: State to put the device into.
1188 */
1189int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
1190{
1191	int error;
1192
1193	if (platform_pci_power_manageable(dev)) {
1194		error = platform_pci_set_power_state(dev, state);
1195		if (!error)
1196			pci_update_current_state(dev, state);
1197	} else
 
 
 
1198		error = -ENODEV;
1199
1200	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
1201		dev->current_state = PCI_D0;
 
1202
1203	return error;
1204}
1205EXPORT_SYMBOL_GPL(pci_platform_power_transition);
1206
1207static int pci_resume_one(struct pci_dev *pci_dev, void *ign)
1208{
1209	pm_request_resume(&pci_dev->dev);
1210	return 0;
1211}
1212
1213/**
1214 * pci_resume_bus - Walk given bus and runtime resume devices on it
1215 * @bus: Top bus of the subtree to walk.
 
1216 */
1217void pci_resume_bus(struct pci_bus *bus)
1218{
1219	if (bus)
1220		pci_walk_bus(bus, pci_resume_one, NULL);
1221}
1222
1223static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
1224{
1225	int delay = 1;
1226	u32 id;
1227
1228	/*
1229	 * After reset, the device should not silently discard config
1230	 * requests, but it may still indicate that it needs more time by
1231	 * responding to them with CRS completions.  The Root Port will
1232	 * generally synthesize ~0 data to complete the read (except when
1233	 * CRS SV is enabled and the read was for the Vendor ID; in that
1234	 * case it synthesizes 0x0001 data).
1235	 *
1236	 * Wait for the device to return a non-CRS completion.  Read the
1237	 * Command register instead of Vendor ID so we don't have to
1238	 * contend with the CRS SV value.
1239	 */
1240	pci_read_config_dword(dev, PCI_COMMAND, &id);
1241	while (id == ~0) {
1242		if (delay > timeout) {
1243			pci_warn(dev, "not ready %dms after %s; giving up\n",
1244				 delay - 1, reset_type);
1245			return -ENOTTY;
1246		}
1247
1248		if (delay > 1000)
1249			pci_info(dev, "not ready %dms after %s; waiting\n",
1250				 delay - 1, reset_type);
1251
1252		msleep(delay);
1253		delay *= 2;
1254		pci_read_config_dword(dev, PCI_COMMAND, &id);
1255	}
1256
1257	if (delay > 1000)
1258		pci_info(dev, "ready %dms after %s\n", delay - 1,
1259			 reset_type);
1260
1261	return 0;
1262}
1263
1264/**
1265 * pci_power_up - Put the given device into D0
1266 * @dev: PCI device to power up
1267 */
1268int pci_power_up(struct pci_dev *dev)
1269{
1270	pci_platform_power_transition(dev, PCI_D0);
1271
1272	/*
1273	 * Mandatory power management transition delays are handled in
1274	 * pci_pm_resume_noirq() and pci_pm_runtime_resume() of the
1275	 * corresponding bridge.
1276	 */
1277	if (dev->runtime_d3cold) {
1278		/*
1279		 * When powering on a bridge from D3cold, the whole hierarchy
1280		 * may be powered on into D0uninitialized state, resume them to
1281		 * give them a chance to suspend again
1282		 */
1283		pci_resume_bus(dev->subordinate);
1284	}
1285
1286	return pci_raw_set_power_state(dev, PCI_D0);
1287}
1288
1289/**
1290 * __pci_dev_set_current_state - Set current state of a PCI device
1291 * @dev: Device to handle
1292 * @data: pointer to state to be set
1293 */
1294static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1295{
1296	pci_power_t state = *(pci_power_t *)data;
1297
1298	dev->current_state = state;
1299	return 0;
1300}
1301
1302/**
1303 * pci_bus_set_current_state - Walk given bus and set current state of devices
1304 * @bus: Top bus of the subtree to walk.
1305 * @state: state to be set
1306 */
1307void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1308{
1309	if (bus)
1310		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1311}
 
1312
1313/**
1314 * pci_set_power_state - Set the power state of a PCI device
1315 * @dev: PCI device to handle.
1316 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1317 *
1318 * Transition a device to a new power state, using the platform firmware and/or
1319 * the device's PCI PM registers.
1320 *
1321 * RETURN VALUE:
1322 * -EINVAL if the requested state is invalid.
1323 * -EIO if device does not support PCI PM or its PM capabilities register has a
1324 * wrong version, or device doesn't support the requested state.
1325 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1326 * 0 if device already is in the requested state.
1327 * 0 if the transition is to D3 but D3 is not supported.
1328 * 0 if device's power state has been successfully changed.
1329 */
1330int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1331{
1332	int error;
1333
1334	/* Bound the state we're entering */
1335	if (state > PCI_D3cold)
1336		state = PCI_D3cold;
1337	else if (state < PCI_D0)
1338		state = PCI_D0;
1339	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1340
1341		/*
1342		 * If the device or the parent bridge do not support PCI
1343		 * PM, ignore the request if we're doing anything other
1344		 * than putting it into D0 (which would only happen on
1345		 * boot).
1346		 */
1347		return 0;
1348
1349	/* Check if we're already there */
1350	if (dev->current_state == state)
 
 
 
1351		return 0;
1352
1353	if (state == PCI_D0)
1354		return pci_power_up(dev);
1355
 
 
1356	/*
1357	 * This device is quirked not to be put into D3, so don't put it in
1358	 * D3
1359	 */
1360	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1361		return 0;
1362
1363	/*
1364	 * To put device in D3cold, we put device into D3hot in native
1365	 * way, then put device into D3cold with platform ops
1366	 */
1367	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1368					PCI_D3hot : state);
1369
1370	if (pci_platform_power_transition(dev, state))
1371		return error;
1372
1373	/* Powering off a bridge may power off the whole hierarchy */
1374	if (state == PCI_D3cold)
1375		pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1376
1377	return 0;
1378}
1379EXPORT_SYMBOL(pci_set_power_state);
1380
1381/**
1382 * pci_choose_state - Choose the power state of a PCI device
1383 * @dev: PCI device to be suspended
1384 * @state: target sleep state for the whole system. This is the value
1385 *	   that is passed to suspend() function.
1386 *
1387 * Returns PCI power state suitable for given device and given system
1388 * message.
1389 */
 
1390pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1391{
1392	pci_power_t ret;
1393
1394	if (!dev->pm_cap)
1395		return PCI_D0;
1396
1397	ret = platform_pci_choose_state(dev);
1398	if (ret != PCI_POWER_ERROR)
1399		return ret;
1400
1401	switch (state.event) {
1402	case PM_EVENT_ON:
1403		return PCI_D0;
1404	case PM_EVENT_FREEZE:
1405	case PM_EVENT_PRETHAW:
1406		/* REVISIT both freeze and pre-thaw "should" use D0 */
1407	case PM_EVENT_SUSPEND:
1408	case PM_EVENT_HIBERNATE:
1409		return PCI_D3hot;
1410	default:
1411		pci_info(dev, "unrecognized suspend event %d\n",
1412			 state.event);
1413		BUG();
1414	}
1415	return PCI_D0;
1416}
 
1417EXPORT_SYMBOL(pci_choose_state);
1418
1419#define PCI_EXP_SAVE_REGS	7
1420
1421static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1422						       u16 cap, bool extended)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1423{
1424	struct pci_cap_saved_state *tmp;
 
1425
1426	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1427		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1428			return tmp;
1429	}
1430	return NULL;
1431}
1432
1433struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1434{
1435	return _pci_find_saved_cap(dev, cap, false);
1436}
1437
1438struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1439{
1440	return _pci_find_saved_cap(dev, cap, true);
1441}
1442
1443static int pci_save_pcie_state(struct pci_dev *dev)
1444{
1445	int i = 0;
1446	struct pci_cap_saved_state *save_state;
1447	u16 *cap;
 
1448
1449	if (!pci_is_pcie(dev))
 
1450		return 0;
1451
1452	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1453	if (!save_state) {
1454		pci_err(dev, "buffer not found in %s\n", __func__);
1455		return -ENOMEM;
1456	}
 
 
 
1457
1458	cap = (u16 *)&save_state->cap.data[0];
1459	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1460	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1461	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1462	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1463	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1464	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1465	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
 
 
 
 
 
 
1466
1467	return 0;
1468}
1469
1470static void pci_restore_pcie_state(struct pci_dev *dev)
1471{
1472	int i = 0;
1473	struct pci_cap_saved_state *save_state;
1474	u16 *cap;
 
1475
1476	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1477	if (!save_state)
 
1478		return;
 
1479
1480	cap = (u16 *)&save_state->cap.data[0];
1481	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1482	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1483	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1484	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1485	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1486	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1487	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
 
 
 
 
 
 
 
 
1488}
1489
 
1490static int pci_save_pcix_state(struct pci_dev *dev)
1491{
1492	int pos;
1493	struct pci_cap_saved_state *save_state;
1494
1495	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1496	if (!pos)
1497		return 0;
1498
1499	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1500	if (!save_state) {
1501		pci_err(dev, "buffer not found in %s\n", __func__);
1502		return -ENOMEM;
1503	}
1504
1505	pci_read_config_word(dev, pos + PCI_X_CMD,
1506			     (u16 *)save_state->cap.data);
1507
1508	return 0;
1509}
1510
1511static void pci_restore_pcix_state(struct pci_dev *dev)
1512{
1513	int i = 0, pos;
1514	struct pci_cap_saved_state *save_state;
1515	u16 *cap;
1516
1517	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1518	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1519	if (!save_state || !pos)
1520		return;
1521	cap = (u16 *)&save_state->cap.data[0];
1522
1523	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1524}
1525
1526static void pci_save_ltr_state(struct pci_dev *dev)
1527{
1528	int ltr;
1529	struct pci_cap_saved_state *save_state;
1530	u16 *cap;
1531
1532	if (!pci_is_pcie(dev))
1533		return;
1534
1535	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1536	if (!ltr)
1537		return;
1538
1539	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1540	if (!save_state) {
1541		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1542		return;
1543	}
1544
1545	cap = (u16 *)&save_state->cap.data[0];
1546	pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1547	pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1548}
1549
1550static void pci_restore_ltr_state(struct pci_dev *dev)
1551{
1552	struct pci_cap_saved_state *save_state;
1553	int ltr;
1554	u16 *cap;
1555
1556	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1557	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1558	if (!save_state || !ltr)
1559		return;
1560
1561	cap = (u16 *)&save_state->cap.data[0];
1562	pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1563	pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1564}
1565
1566/**
1567 * pci_save_state - save the PCI configuration space of a device before
1568 *		    suspending
1569 * @dev: PCI device that we're dealing with
1570 */
1571int pci_save_state(struct pci_dev *dev)
 
1572{
1573	int i;
1574	/* XXX: 100% dword access ok here? */
1575	for (i = 0; i < 16; i++) {
1576		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1577		pci_dbg(dev, "saving config space at offset %#x (reading %#x)\n",
1578			i * 4, dev->saved_config_space[i]);
1579	}
1580	dev->state_saved = true;
1581
1582	i = pci_save_pcie_state(dev);
1583	if (i != 0)
1584		return i;
1585
1586	i = pci_save_pcix_state(dev);
1587	if (i != 0)
1588		return i;
1589
1590	pci_save_ltr_state(dev);
1591	pci_save_dpc_state(dev);
1592	pci_save_aer_state(dev);
1593	pci_save_ptm_state(dev);
1594	return pci_save_vc_state(dev);
1595}
1596EXPORT_SYMBOL(pci_save_state);
1597
1598static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1599				     u32 saved_val, int retry, bool force)
1600{
1601	u32 val;
1602
1603	pci_read_config_dword(pdev, offset, &val);
1604	if (!force && val == saved_val)
1605		return;
1606
1607	for (;;) {
1608		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1609			offset, val, saved_val);
1610		pci_write_config_dword(pdev, offset, saved_val);
1611		if (retry-- <= 0)
1612			return;
1613
1614		pci_read_config_dword(pdev, offset, &val);
1615		if (val == saved_val)
1616			return;
1617
1618		mdelay(1);
1619	}
1620}
1621
1622static void pci_restore_config_space_range(struct pci_dev *pdev,
1623					   int start, int end, int retry,
1624					   bool force)
1625{
1626	int index;
1627
1628	for (index = end; index >= start; index--)
1629		pci_restore_config_dword(pdev, 4 * index,
1630					 pdev->saved_config_space[index],
1631					 retry, force);
1632}
1633
1634static void pci_restore_config_space(struct pci_dev *pdev)
1635{
1636	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1637		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1638		/* Restore BARs before the command register. */
1639		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1640		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1641	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1642		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1643
1644		/*
1645		 * Force rewriting of prefetch registers to avoid S3 resume
1646		 * issues on Intel PCI bridges that occur when these
1647		 * registers are not explicitly written.
1648		 */
1649		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1650		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1651	} else {
1652		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1653	}
1654}
1655
1656static void pci_restore_rebar_state(struct pci_dev *pdev)
1657{
1658	unsigned int pos, nbars, i;
1659	u32 ctrl;
1660
1661	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1662	if (!pos)
1663		return;
1664
1665	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1666	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1667		    PCI_REBAR_CTRL_NBAR_SHIFT;
1668
1669	for (i = 0; i < nbars; i++, pos += 8) {
1670		struct resource *res;
1671		int bar_idx, size;
1672
1673		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1674		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1675		res = pdev->resource + bar_idx;
1676		size = pci_rebar_bytes_to_size(resource_size(res));
1677		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1678		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1679		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1680	}
1681}
1682
1683/**
1684 * pci_restore_state - Restore the saved state of a PCI device
1685 * @dev: PCI device that we're dealing with
1686 */
1687void pci_restore_state(struct pci_dev *dev)
1688{
1689	if (!dev->state_saved)
1690		return;
1691
1692	/*
1693	 * Restore max latencies (in the LTR capability) before enabling
1694	 * LTR itself (in the PCIe capability).
1695	 */
1696	pci_restore_ltr_state(dev);
1697
1698	pci_restore_pcie_state(dev);
1699	pci_restore_pasid_state(dev);
1700	pci_restore_pri_state(dev);
1701	pci_restore_ats_state(dev);
1702	pci_restore_vc_state(dev);
1703	pci_restore_rebar_state(dev);
1704	pci_restore_dpc_state(dev);
1705	pci_restore_ptm_state(dev);
1706
1707	pci_aer_clear_status(dev);
1708	pci_restore_aer_state(dev);
1709
1710	pci_restore_config_space(dev);
1711
1712	pci_restore_pcix_state(dev);
1713	pci_restore_msi_state(dev);
1714
1715	/* Restore ACS and IOV configuration state */
1716	pci_enable_acs(dev);
1717	pci_restore_iov_state(dev);
1718
1719	dev->state_saved = false;
1720}
1721EXPORT_SYMBOL(pci_restore_state);
1722
1723struct pci_saved_state {
1724	u32 config_space[16];
1725	struct pci_cap_saved_data cap[];
1726};
1727
1728/**
1729 * pci_store_saved_state - Allocate and return an opaque struct containing
1730 *			   the device saved state.
1731 * @dev: PCI device that we're dealing with
1732 *
1733 * Return NULL if no state or error.
1734 */
1735struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1736{
1737	struct pci_saved_state *state;
1738	struct pci_cap_saved_state *tmp;
1739	struct pci_cap_saved_data *cap;
 
1740	size_t size;
1741
1742	if (!dev->state_saved)
1743		return NULL;
1744
1745	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1746
1747	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1748		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1749
1750	state = kzalloc(size, GFP_KERNEL);
1751	if (!state)
1752		return NULL;
1753
1754	memcpy(state->config_space, dev->saved_config_space,
1755	       sizeof(state->config_space));
1756
1757	cap = state->cap;
1758	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1759		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1760		memcpy(cap, &tmp->cap, len);
1761		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1762	}
1763	/* Empty cap_save terminates list */
1764
1765	return state;
1766}
1767EXPORT_SYMBOL_GPL(pci_store_saved_state);
1768
1769/**
1770 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1771 * @dev: PCI device that we're dealing with
1772 * @state: Saved state returned from pci_store_saved_state()
1773 */
1774int pci_load_saved_state(struct pci_dev *dev,
1775			 struct pci_saved_state *state)
1776{
1777	struct pci_cap_saved_data *cap;
1778
1779	dev->state_saved = false;
1780
1781	if (!state)
1782		return 0;
1783
1784	memcpy(dev->saved_config_space, state->config_space,
1785	       sizeof(state->config_space));
1786
1787	cap = state->cap;
1788	while (cap->size) {
1789		struct pci_cap_saved_state *tmp;
1790
1791		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1792		if (!tmp || tmp->cap.size != cap->size)
1793			return -EINVAL;
1794
1795		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1796		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1797		       sizeof(struct pci_cap_saved_data) + cap->size);
1798	}
1799
1800	dev->state_saved = true;
1801	return 0;
1802}
1803EXPORT_SYMBOL_GPL(pci_load_saved_state);
1804
1805/**
1806 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1807 *				   and free the memory allocated for it.
1808 * @dev: PCI device that we're dealing with
1809 * @state: Pointer to saved state returned from pci_store_saved_state()
1810 */
1811int pci_load_and_free_saved_state(struct pci_dev *dev,
1812				  struct pci_saved_state **state)
1813{
1814	int ret = pci_load_saved_state(dev, *state);
1815	kfree(*state);
1816	*state = NULL;
1817	return ret;
1818}
1819EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1820
1821int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1822{
1823	return pci_enable_resources(dev, bars);
1824}
1825
1826static int do_pci_enable_device(struct pci_dev *dev, int bars)
1827{
1828	int err;
1829	struct pci_dev *bridge;
1830	u16 cmd;
1831	u8 pin;
1832
1833	err = pci_set_power_state(dev, PCI_D0);
1834	if (err < 0 && err != -EIO)
1835		return err;
1836
1837	bridge = pci_upstream_bridge(dev);
1838	if (bridge)
1839		pcie_aspm_powersave_config_link(bridge);
1840
1841	err = pcibios_enable_device(dev, bars);
1842	if (err < 0)
1843		return err;
1844	pci_fixup_device(pci_fixup_enable, dev);
1845
1846	if (dev->msi_enabled || dev->msix_enabled)
1847		return 0;
1848
1849	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1850	if (pin) {
1851		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1852		if (cmd & PCI_COMMAND_INTX_DISABLE)
1853			pci_write_config_word(dev, PCI_COMMAND,
1854					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1855	}
1856
1857	return 0;
1858}
1859
1860/**
1861 * pci_reenable_device - Resume abandoned device
1862 * @dev: PCI device to be resumed
1863 *
1864 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1865 * to be called by normal code, write proper resume handler and use it instead.
1866 */
1867int pci_reenable_device(struct pci_dev *dev)
1868{
1869	if (pci_is_enabled(dev))
1870		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1871	return 0;
1872}
1873EXPORT_SYMBOL(pci_reenable_device);
1874
1875static void pci_enable_bridge(struct pci_dev *dev)
 
1876{
1877	struct pci_dev *bridge;
1878	int retval;
1879
1880	bridge = pci_upstream_bridge(dev);
1881	if (bridge)
1882		pci_enable_bridge(bridge);
1883
1884	if (pci_is_enabled(dev)) {
1885		if (!dev->is_busmaster)
1886			pci_set_master(dev);
1887		return;
1888	}
1889
1890	retval = pci_enable_device(dev);
1891	if (retval)
1892		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1893			retval);
1894	pci_set_master(dev);
1895}
1896
1897static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1898{
1899	struct pci_dev *bridge;
1900	int err;
1901	int i, bars = 0;
1902
1903	/*
1904	 * Power state could be unknown at this point, either due to a fresh
1905	 * boot or a device removal call.  So get the current power state
1906	 * so that things like MSI message writing will behave as expected
1907	 * (e.g. if the device really is in D0 at enable time).
1908	 */
1909	pci_update_current_state(dev, dev->current_state);
 
 
 
 
1910
1911	if (atomic_inc_return(&dev->enable_cnt) > 1)
1912		return 0;		/* already enabled */
1913
1914	bridge = pci_upstream_bridge(dev);
1915	if (bridge)
1916		pci_enable_bridge(bridge);
1917
1918	/* only skip sriov related */
1919	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1920		if (dev->resource[i].flags & flags)
1921			bars |= (1 << i);
1922	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1923		if (dev->resource[i].flags & flags)
1924			bars |= (1 << i);
1925
1926	err = do_pci_enable_device(dev, bars);
1927	if (err < 0)
1928		atomic_dec(&dev->enable_cnt);
1929	return err;
1930}
1931
1932/**
1933 * pci_enable_device_io - Initialize a device for use with IO space
1934 * @dev: PCI device to be initialized
1935 *
1936 * Initialize device before it's used by a driver. Ask low-level code
1937 * to enable I/O resources. Wake up the device if it was suspended.
1938 * Beware, this function can fail.
1939 */
1940int pci_enable_device_io(struct pci_dev *dev)
1941{
1942	return pci_enable_device_flags(dev, IORESOURCE_IO);
1943}
1944EXPORT_SYMBOL(pci_enable_device_io);
1945
1946/**
1947 * pci_enable_device_mem - Initialize a device for use with Memory space
1948 * @dev: PCI device to be initialized
1949 *
1950 * Initialize device before it's used by a driver. Ask low-level code
1951 * to enable Memory resources. Wake up the device if it was suspended.
1952 * Beware, this function can fail.
1953 */
1954int pci_enable_device_mem(struct pci_dev *dev)
1955{
1956	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1957}
1958EXPORT_SYMBOL(pci_enable_device_mem);
1959
1960/**
1961 * pci_enable_device - Initialize device before it's used by a driver.
1962 * @dev: PCI device to be initialized
1963 *
1964 * Initialize device before it's used by a driver. Ask low-level code
1965 * to enable I/O and memory. Wake up the device if it was suspended.
1966 * Beware, this function can fail.
1967 *
1968 * Note we don't actually enable the device many times if we call
1969 * this function repeatedly (we just increment the count).
1970 */
1971int pci_enable_device(struct pci_dev *dev)
1972{
1973	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1974}
1975EXPORT_SYMBOL(pci_enable_device);
1976
1977/*
1978 * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
1979 * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
1980 * there's no need to track it separately.  pci_devres is initialized
1981 * when a device is enabled using managed PCI device enable interface.
1982 */
1983struct pci_devres {
1984	unsigned int enabled:1;
1985	unsigned int pinned:1;
1986	unsigned int orig_intx:1;
1987	unsigned int restore_intx:1;
1988	unsigned int mwi:1;
1989	u32 region_mask;
1990};
1991
1992static void pcim_release(struct device *gendev, void *res)
1993{
1994	struct pci_dev *dev = to_pci_dev(gendev);
1995	struct pci_devres *this = res;
1996	int i;
1997
1998	if (dev->msi_enabled)
1999		pci_disable_msi(dev);
2000	if (dev->msix_enabled)
2001		pci_disable_msix(dev);
2002
2003	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
2004		if (this->region_mask & (1 << i))
2005			pci_release_region(dev, i);
2006
2007	if (this->mwi)
2008		pci_clear_mwi(dev);
2009
2010	if (this->restore_intx)
2011		pci_intx(dev, this->orig_intx);
2012
2013	if (this->enabled && !this->pinned)
2014		pci_disable_device(dev);
2015}
2016
2017static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
2018{
2019	struct pci_devres *dr, *new_dr;
2020
2021	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
2022	if (dr)
2023		return dr;
2024
2025	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
2026	if (!new_dr)
2027		return NULL;
2028	return devres_get(&pdev->dev, new_dr, NULL, NULL);
2029}
2030
2031static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
2032{
2033	if (pci_is_managed(pdev))
2034		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
2035	return NULL;
2036}
2037
2038/**
2039 * pcim_enable_device - Managed pci_enable_device()
2040 * @pdev: PCI device to be initialized
2041 *
2042 * Managed pci_enable_device().
2043 */
2044int pcim_enable_device(struct pci_dev *pdev)
2045{
2046	struct pci_devres *dr;
2047	int rc;
2048
2049	dr = get_pci_dr(pdev);
2050	if (unlikely(!dr))
2051		return -ENOMEM;
2052	if (dr->enabled)
2053		return 0;
2054
2055	rc = pci_enable_device(pdev);
2056	if (!rc) {
2057		pdev->is_managed = 1;
2058		dr->enabled = 1;
2059	}
2060	return rc;
2061}
2062EXPORT_SYMBOL(pcim_enable_device);
2063
2064/**
2065 * pcim_pin_device - Pin managed PCI device
2066 * @pdev: PCI device to pin
2067 *
2068 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
2069 * driver detach.  @pdev must have been enabled with
2070 * pcim_enable_device().
2071 */
2072void pcim_pin_device(struct pci_dev *pdev)
2073{
2074	struct pci_devres *dr;
2075
2076	dr = find_pci_dr(pdev);
2077	WARN_ON(!dr || !dr->enabled);
2078	if (dr)
2079		dr->pinned = 1;
2080}
2081EXPORT_SYMBOL(pcim_pin_device);
2082
2083/*
2084 * pcibios_add_device - provide arch specific hooks when adding device dev
2085 * @dev: the PCI device being added
2086 *
2087 * Permits the platform to provide architecture specific functionality when
2088 * devices are added. This is the default implementation. Architecture
2089 * implementations can override this.
2090 */
2091int __weak pcibios_add_device(struct pci_dev *dev)
2092{
2093	return 0;
2094}
2095
2096/**
2097 * pcibios_release_device - provide arch specific hooks when releasing
2098 *			    device dev
2099 * @dev: the PCI device being released
2100 *
2101 * Permits the platform to provide architecture specific functionality when
2102 * devices are released. This is the default implementation. Architecture
2103 * implementations can override this.
2104 */
2105void __weak pcibios_release_device(struct pci_dev *dev) {}
2106
2107/**
2108 * pcibios_disable_device - disable arch specific PCI resources for device dev
2109 * @dev: the PCI device to disable
2110 *
2111 * Disables architecture specific PCI resources for the device. This
2112 * is the default implementation. Architecture implementations can
2113 * override this.
2114 */
2115void __weak pcibios_disable_device(struct pci_dev *dev) {}
2116
2117/**
2118 * pcibios_penalize_isa_irq - penalize an ISA IRQ
2119 * @irq: ISA IRQ to penalize
2120 * @active: IRQ active or not
2121 *
2122 * Permits the platform to provide architecture-specific functionality when
2123 * penalizing ISA IRQs. This is the default implementation. Architecture
2124 * implementations can override this.
2125 */
2126void __weak pcibios_penalize_isa_irq(int irq, int active) {}
2127
2128static void do_pci_disable_device(struct pci_dev *dev)
2129{
2130	u16 pci_command;
2131
2132	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
2133	if (pci_command & PCI_COMMAND_MASTER) {
2134		pci_command &= ~PCI_COMMAND_MASTER;
2135		pci_write_config_word(dev, PCI_COMMAND, pci_command);
2136	}
2137
2138	pcibios_disable_device(dev);
2139}
2140
2141/**
2142 * pci_disable_enabled_device - Disable device without updating enable_cnt
2143 * @dev: PCI device to disable
2144 *
2145 * NOTE: This function is a backend of PCI power management routines and is
2146 * not supposed to be called drivers.
2147 */
2148void pci_disable_enabled_device(struct pci_dev *dev)
2149{
2150	if (pci_is_enabled(dev))
2151		do_pci_disable_device(dev);
2152}
2153
2154/**
2155 * pci_disable_device - Disable PCI device after use
2156 * @dev: PCI device to be disabled
2157 *
2158 * Signal to the system that the PCI device is not in use by the system
2159 * anymore.  This only involves disabling PCI bus-mastering, if active.
2160 *
2161 * Note we don't actually disable the device until all callers of
2162 * pci_enable_device() have called pci_disable_device().
2163 */
2164void pci_disable_device(struct pci_dev *dev)
 
2165{
2166	struct pci_devres *dr;
2167
2168	dr = find_pci_dr(dev);
2169	if (dr)
2170		dr->enabled = 0;
2171
2172	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
2173		      "disabling already-disabled device");
2174
2175	if (atomic_dec_return(&dev->enable_cnt) != 0)
2176		return;
2177
2178	do_pci_disable_device(dev);
2179
2180	dev->is_busmaster = 0;
2181}
2182EXPORT_SYMBOL(pci_disable_device);
2183
2184/**
2185 * pcibios_set_pcie_reset_state - set reset state for device dev
2186 * @dev: the PCIe device reset
2187 * @state: Reset state to enter into
2188 *
2189 * Set the PCIe reset state for the device. This is the default
 
2190 * implementation. Architecture implementations can override this.
2191 */
2192int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
2193					enum pcie_reset_state state)
2194{
2195	return -EINVAL;
2196}
2197
2198/**
2199 * pci_set_pcie_reset_state - set reset state for device dev
2200 * @dev: the PCIe device reset
2201 * @state: Reset state to enter into
2202 *
 
2203 * Sets the PCI reset state for the device.
2204 */
2205int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
2206{
2207	return pcibios_set_pcie_reset_state(dev, state);
2208}
2209EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
2210
2211void pcie_clear_device_status(struct pci_dev *dev)
2212{
2213	u16 sta;
2214
2215	pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &sta);
2216	pcie_capability_write_word(dev, PCI_EXP_DEVSTA, sta);
2217}
2218
2219/**
2220 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
2221 * @dev: PCIe root port or event collector.
2222 */
2223void pcie_clear_root_pme_status(struct pci_dev *dev)
2224{
2225	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
2226}
2227
2228/**
2229 * pci_check_pme_status - Check if given device has generated PME.
2230 * @dev: Device to check.
2231 *
2232 * Check the PME status of the device and if set, clear it and clear PME enable
2233 * (if set).  Return 'true' if PME status and PME enable were both set or
2234 * 'false' otherwise.
2235 */
2236bool pci_check_pme_status(struct pci_dev *dev)
2237{
2238	int pmcsr_pos;
2239	u16 pmcsr;
2240	bool ret = false;
2241
2242	if (!dev->pm_cap)
2243		return false;
2244
2245	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2246	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2247	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2248		return false;
2249
2250	/* Clear PME status. */
2251	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2252	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2253		/* Disable PME to avoid interrupt flood. */
2254		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2255		ret = true;
2256	}
2257
2258	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2259
2260	return ret;
2261}
2262
2263/**
2264 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2265 * @dev: Device to handle.
2266 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2267 *
2268 * Check if @dev has generated PME and queue a resume request for it in that
2269 * case.
2270 */
2271static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2272{
2273	if (pme_poll_reset && dev->pme_poll)
2274		dev->pme_poll = false;
2275
2276	if (pci_check_pme_status(dev)) {
2277		pci_wakeup_event(dev);
2278		pm_request_resume(&dev->dev);
2279	}
2280	return 0;
2281}
2282
2283/**
2284 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2285 * @bus: Top bus of the subtree to walk.
2286 */
2287void pci_pme_wakeup_bus(struct pci_bus *bus)
2288{
2289	if (bus)
2290		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2291}
2292
2293
2294/**
2295 * pci_pme_capable - check the capability of PCI device to generate PME#
2296 * @dev: PCI device to handle.
2297 * @state: PCI state from which device will issue PME#.
2298 */
2299bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2300{
2301	if (!dev->pm_cap)
2302		return false;
2303
2304	return !!(dev->pme_support & (1 << state));
2305}
2306EXPORT_SYMBOL(pci_pme_capable);
2307
2308static void pci_pme_list_scan(struct work_struct *work)
2309{
2310	struct pci_pme_device *pme_dev, *n;
2311
2312	mutex_lock(&pci_pme_list_mutex);
2313	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2314		if (pme_dev->dev->pme_poll) {
2315			struct pci_dev *bridge;
2316
2317			bridge = pme_dev->dev->bus->self;
2318			/*
2319			 * If bridge is in low power state, the
2320			 * configuration space of subordinate devices
2321			 * may be not accessible
2322			 */
2323			if (bridge && bridge->current_state != PCI_D0)
2324				continue;
2325			/*
2326			 * If the device is in D3cold it should not be
2327			 * polled either.
2328			 */
2329			if (pme_dev->dev->current_state == PCI_D3cold)
2330				continue;
2331
2332			pci_pme_wakeup(pme_dev->dev, NULL);
2333		} else {
2334			list_del(&pme_dev->list);
2335			kfree(pme_dev);
2336		}
 
 
 
2337	}
2338	if (!list_empty(&pci_pme_list))
2339		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2340				   msecs_to_jiffies(PME_TIMEOUT));
2341	mutex_unlock(&pci_pme_list_mutex);
2342}
2343
2344static void __pci_pme_active(struct pci_dev *dev, bool enable)
 
 
 
 
 
 
 
 
2345{
2346	u16 pmcsr;
2347
2348	if (!dev->pme_support)
2349		return;
2350
2351	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2352	/* Clear PME_Status by writing 1 to it and enable PME# */
2353	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2354	if (!enable)
2355		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2356
2357	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2358}
2359
2360/**
2361 * pci_pme_restore - Restore PME configuration after config space restore.
2362 * @dev: PCI device to update.
2363 */
2364void pci_pme_restore(struct pci_dev *dev)
2365{
2366	u16 pmcsr;
2367
2368	if (!dev->pme_support)
2369		return;
2370
2371	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2372	if (dev->wakeup_prepared) {
2373		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2374		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2375	} else {
2376		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2377		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2378	}
2379	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2380}
2381
2382/**
2383 * pci_pme_active - enable or disable PCI device's PME# function
2384 * @dev: PCI device to handle.
2385 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2386 *
2387 * The caller must verify that the device is capable of generating PME# before
2388 * calling this function with @enable equal to 'true'.
2389 */
2390void pci_pme_active(struct pci_dev *dev, bool enable)
2391{
2392	__pci_pme_active(dev, enable);
2393
2394	/*
2395	 * PCI (as opposed to PCIe) PME requires that the device have
2396	 * its PME# line hooked up correctly. Not all hardware vendors
2397	 * do this, so the PME never gets delivered and the device
2398	 * remains asleep. The easiest way around this is to
2399	 * periodically walk the list of suspended devices and check
2400	 * whether any have their PME flag set. The assumption is that
2401	 * we'll wake up often enough anyway that this won't be a huge
2402	 * hit, and the power savings from the devices will still be a
2403	 * win.
2404	 *
2405	 * Although PCIe uses in-band PME message instead of PME# line
2406	 * to report PME, PME does not work for some PCIe devices in
2407	 * reality.  For example, there are devices that set their PME
2408	 * status bits, but don't really bother to send a PME message;
2409	 * there are PCI Express Root Ports that don't bother to
2410	 * trigger interrupts when they receive PME messages from the
2411	 * devices below.  So PME poll is used for PCIe devices too.
2412	 */
2413
2414	if (dev->pme_poll) {
2415		struct pci_pme_device *pme_dev;
2416		if (enable) {
2417			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2418					  GFP_KERNEL);
2419			if (!pme_dev) {
2420				pci_warn(dev, "can't enable PME#\n");
2421				return;
2422			}
2423			pme_dev->dev = dev;
2424			mutex_lock(&pci_pme_list_mutex);
2425			list_add(&pme_dev->list, &pci_pme_list);
2426			if (list_is_singular(&pci_pme_list))
2427				queue_delayed_work(system_freezable_wq,
2428						   &pci_pme_work,
2429						   msecs_to_jiffies(PME_TIMEOUT));
2430			mutex_unlock(&pci_pme_list_mutex);
2431		} else {
2432			mutex_lock(&pci_pme_list_mutex);
2433			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2434				if (pme_dev->dev == dev) {
2435					list_del(&pme_dev->list);
2436					kfree(pme_dev);
2437					break;
2438				}
2439			}
2440			mutex_unlock(&pci_pme_list_mutex);
2441		}
2442	}
2443
2444	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
 
2445}
2446EXPORT_SYMBOL(pci_pme_active);
2447
2448/**
2449 * __pci_enable_wake - enable PCI device as wakeup event source
2450 * @dev: PCI device affected
2451 * @state: PCI state from which device will issue wakeup events
 
2452 * @enable: True to enable event generation; false to disable
2453 *
2454 * This enables the device as a wakeup event source, or disables it.
2455 * When such events involves platform-specific hooks, those hooks are
2456 * called automatically by this routine.
2457 *
2458 * Devices with legacy power management (no standard PCI PM capabilities)
2459 * always require such platform hooks.
2460 *
2461 * RETURN VALUE:
2462 * 0 is returned on success
2463 * -EINVAL is returned if device is not supposed to wake up the system
2464 * Error code depending on the platform is returned if both the platform and
2465 * the native mechanism fail to enable the generation of wake-up events
2466 */
2467static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
 
2468{
2469	int ret = 0;
2470
2471	/*
2472	 * Bridges that are not power-manageable directly only signal
2473	 * wakeup on behalf of subordinate devices which is set up
2474	 * elsewhere, so skip them. However, bridges that are
2475	 * power-manageable may signal wakeup for themselves (for example,
2476	 * on a hotplug event) and they need to be covered here.
2477	 */
2478	if (!pci_power_manageable(dev))
2479		return 0;
2480
2481	/* Don't do the same thing twice in a row for one device. */
2482	if (!!enable == !!dev->wakeup_prepared)
2483		return 0;
2484
2485	/*
2486	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2487	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2488	 * enable.  To disable wake-up we call the platform first, for symmetry.
2489	 */
2490
2491	if (enable) {
2492		int error;
2493
2494		/*
2495		 * Enable PME signaling if the device can signal PME from
2496		 * D3cold regardless of whether or not it can signal PME from
2497		 * the current target state, because that will allow it to
2498		 * signal PME when the hierarchy above it goes into D3cold and
2499		 * the device itself ends up in D3cold as a result of that.
2500		 */
2501		if (pci_pme_capable(dev, state) || pci_pme_capable(dev, PCI_D3cold))
2502			pci_pme_active(dev, true);
2503		else
2504			ret = 1;
2505		error = platform_pci_set_wakeup(dev, true);
 
2506		if (ret)
2507			ret = error;
2508		if (!ret)
2509			dev->wakeup_prepared = true;
2510	} else {
2511		platform_pci_set_wakeup(dev, false);
 
 
 
2512		pci_pme_active(dev, false);
2513		dev->wakeup_prepared = false;
2514	}
2515
2516	return ret;
2517}
2518
2519/**
2520 * pci_enable_wake - change wakeup settings for a PCI device
2521 * @pci_dev: Target device
2522 * @state: PCI state from which device will issue wakeup events
2523 * @enable: Whether or not to enable event generation
2524 *
2525 * If @enable is set, check device_may_wakeup() for the device before calling
2526 * __pci_enable_wake() for it.
2527 */
2528int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2529{
2530	if (enable && !device_may_wakeup(&pci_dev->dev))
2531		return -EINVAL;
2532
2533	return __pci_enable_wake(pci_dev, state, enable);
2534}
2535EXPORT_SYMBOL(pci_enable_wake);
2536
2537/**
2538 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2539 * @dev: PCI device to prepare
2540 * @enable: True to enable wake-up event generation; false to disable
2541 *
2542 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2543 * and this function allows them to set that up cleanly - pci_enable_wake()
2544 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2545 * ordering constraints.
2546 *
2547 * This function only returns error code if the device is not allowed to wake
2548 * up the system from sleep or it is not capable of generating PME# from both
2549 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2550 */
2551int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2552{
2553	return pci_pme_capable(dev, PCI_D3cold) ?
2554			pci_enable_wake(dev, PCI_D3cold, enable) :
2555			pci_enable_wake(dev, PCI_D3hot, enable);
2556}
2557EXPORT_SYMBOL(pci_wake_from_d3);
2558
2559/**
2560 * pci_target_state - find an appropriate low power state for a given PCI dev
2561 * @dev: PCI device
2562 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2563 *
2564 * Use underlying platform code to find a supported low power state for @dev.
2565 * If the platform can't manage @dev, return the deepest state from which it
2566 * can generate wake events, based on any available PME info.
2567 */
2568static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2569{
2570	pci_power_t target_state = PCI_D3hot;
2571
2572	if (platform_pci_power_manageable(dev)) {
2573		/*
2574		 * Call the platform to find the target state for the device.
 
2575		 */
2576		pci_power_t state = platform_pci_choose_state(dev);
2577
2578		switch (state) {
2579		case PCI_POWER_ERROR:
2580		case PCI_UNKNOWN:
2581			break;
2582		case PCI_D1:
2583		case PCI_D2:
2584			if (pci_no_d1d2(dev))
2585				break;
2586			fallthrough;
2587		default:
2588			target_state = state;
2589		}
2590
2591		return target_state;
2592	}
2593
2594	if (!dev->pm_cap)
2595		target_state = PCI_D0;
2596
2597	/*
2598	 * If the device is in D3cold even though it's not power-manageable by
2599	 * the platform, it may have been powered down by non-standard means.
2600	 * Best to let it slumber.
2601	 */
2602	if (dev->current_state == PCI_D3cold)
2603		target_state = PCI_D3cold;
2604
2605	if (wakeup && dev->pme_support) {
2606		pci_power_t state = target_state;
2607
2608		/*
2609		 * Find the deepest state from which the device can generate
2610		 * PME#.
 
2611		 */
2612		while (state && !(dev->pme_support & (1 << state)))
2613			state--;
2614
2615		if (state)
2616			return state;
2617		else if (dev->pme_support & 1)
2618			return PCI_D0;
2619	}
2620
2621	return target_state;
2622}
2623
2624/**
2625 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2626 *			  into a sleep state
2627 * @dev: Device to handle.
2628 *
2629 * Choose the power state appropriate for the device depending on whether
2630 * it can wake up the system and/or is power manageable by the platform
2631 * (PCI_D3hot is the default) and put the device into that state.
2632 */
2633int pci_prepare_to_sleep(struct pci_dev *dev)
2634{
2635	bool wakeup = device_may_wakeup(&dev->dev);
2636	pci_power_t target_state = pci_target_state(dev, wakeup);
2637	int error;
2638
2639	if (target_state == PCI_POWER_ERROR)
2640		return -EIO;
2641
2642	/*
2643	 * There are systems (for example, Intel mobile chips since Coffee
2644	 * Lake) where the power drawn while suspended can be significantly
2645	 * reduced by disabling PTM on PCIe root ports as this allows the
2646	 * port to enter a lower-power PM state and the SoC to reach a
2647	 * lower-power idle state as a whole.
2648	 */
2649	if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2650		pci_disable_ptm(dev);
2651
2652	pci_enable_wake(dev, target_state, wakeup);
2653
2654	error = pci_set_power_state(dev, target_state);
2655
2656	if (error) {
2657		pci_enable_wake(dev, target_state, false);
2658		pci_restore_ptm_state(dev);
2659	}
2660
2661	return error;
2662}
2663EXPORT_SYMBOL(pci_prepare_to_sleep);
2664
2665/**
2666 * pci_back_from_sleep - turn PCI device on during system-wide transition
2667 *			 into working state
2668 * @dev: Device to handle.
2669 *
2670 * Disable device's system wake-up capability and put it into D0.
2671 */
2672int pci_back_from_sleep(struct pci_dev *dev)
2673{
2674	pci_enable_wake(dev, PCI_D0, false);
2675	return pci_set_power_state(dev, PCI_D0);
2676}
2677EXPORT_SYMBOL(pci_back_from_sleep);
2678
2679/**
2680 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2681 * @dev: PCI device being suspended.
2682 *
2683 * Prepare @dev to generate wake-up events at run time and put it into a low
2684 * power state.
2685 */
2686int pci_finish_runtime_suspend(struct pci_dev *dev)
2687{
2688	pci_power_t target_state;
2689	int error;
2690
2691	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2692	if (target_state == PCI_POWER_ERROR)
2693		return -EIO;
2694
2695	dev->runtime_d3cold = target_state == PCI_D3cold;
2696
2697	/*
2698	 * There are systems (for example, Intel mobile chips since Coffee
2699	 * Lake) where the power drawn while suspended can be significantly
2700	 * reduced by disabling PTM on PCIe root ports as this allows the
2701	 * port to enter a lower-power PM state and the SoC to reach a
2702	 * lower-power idle state as a whole.
2703	 */
2704	if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
2705		pci_disable_ptm(dev);
2706
2707	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2708
2709	error = pci_set_power_state(dev, target_state);
2710
2711	if (error) {
2712		pci_enable_wake(dev, target_state, false);
2713		pci_restore_ptm_state(dev);
2714		dev->runtime_d3cold = false;
2715	}
2716
2717	return error;
2718}
2719
2720/**
2721 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2722 * @dev: Device to check.
2723 *
2724 * Return true if the device itself is capable of generating wake-up events
2725 * (through the platform or using the native PCIe PME) or if the device supports
2726 * PME and one of its upstream bridges can generate wake-up events.
2727 */
2728bool pci_dev_run_wake(struct pci_dev *dev)
2729{
2730	struct pci_bus *bus = dev->bus;
2731
 
 
 
2732	if (!dev->pme_support)
2733		return false;
2734
2735	/* PME-capable in principle, but not from the target power state */
2736	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2737		return false;
2738
2739	if (device_can_wakeup(&dev->dev))
2740		return true;
2741
2742	while (bus->parent) {
2743		struct pci_dev *bridge = bus->self;
2744
2745		if (device_can_wakeup(&bridge->dev))
2746			return true;
2747
2748		bus = bus->parent;
2749	}
2750
2751	/* We have reached the root bus. */
2752	if (bus->bridge)
2753		return device_can_wakeup(bus->bridge);
2754
2755	return false;
2756}
2757EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2758
2759/**
2760 * pci_dev_need_resume - Check if it is necessary to resume the device.
2761 * @pci_dev: Device to check.
2762 *
2763 * Return 'true' if the device is not runtime-suspended or it has to be
2764 * reconfigured due to wakeup settings difference between system and runtime
2765 * suspend, or the current power state of it is not suitable for the upcoming
2766 * (system-wide) transition.
2767 */
2768bool pci_dev_need_resume(struct pci_dev *pci_dev)
2769{
2770	struct device *dev = &pci_dev->dev;
2771	pci_power_t target_state;
2772
2773	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2774		return true;
2775
2776	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2777
2778	/*
2779	 * If the earlier platform check has not triggered, D3cold is just power
2780	 * removal on top of D3hot, so no need to resume the device in that
2781	 * case.
2782	 */
2783	return target_state != pci_dev->current_state &&
2784		target_state != PCI_D3cold &&
2785		pci_dev->current_state != PCI_D3hot;
2786}
2787
2788/**
2789 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2790 * @pci_dev: Device to check.
2791 *
2792 * If the device is suspended and it is not configured for system wakeup,
2793 * disable PME for it to prevent it from waking up the system unnecessarily.
2794 *
2795 * Note that if the device's power state is D3cold and the platform check in
2796 * pci_dev_need_resume() has not triggered, the device's configuration need not
2797 * be changed.
2798 */
2799void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2800{
2801	struct device *dev = &pci_dev->dev;
2802
2803	spin_lock_irq(&dev->power.lock);
2804
2805	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2806	    pci_dev->current_state < PCI_D3cold)
2807		__pci_pme_active(pci_dev, false);
2808
2809	spin_unlock_irq(&dev->power.lock);
2810}
2811
2812/**
2813 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2814 * @pci_dev: Device to handle.
2815 *
2816 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2817 * it might have been disabled during the prepare phase of system suspend if
2818 * the device was not configured for system wakeup.
2819 */
2820void pci_dev_complete_resume(struct pci_dev *pci_dev)
2821{
2822	struct device *dev = &pci_dev->dev;
2823
2824	if (!pci_dev_run_wake(pci_dev))
2825		return;
2826
2827	spin_lock_irq(&dev->power.lock);
2828
2829	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2830		__pci_pme_active(pci_dev, true);
2831
2832	spin_unlock_irq(&dev->power.lock);
2833}
2834
2835void pci_config_pm_runtime_get(struct pci_dev *pdev)
2836{
2837	struct device *dev = &pdev->dev;
2838	struct device *parent = dev->parent;
2839
2840	if (parent)
2841		pm_runtime_get_sync(parent);
2842	pm_runtime_get_noresume(dev);
2843	/*
2844	 * pdev->current_state is set to PCI_D3cold during suspending,
2845	 * so wait until suspending completes
2846	 */
2847	pm_runtime_barrier(dev);
2848	/*
2849	 * Only need to resume devices in D3cold, because config
2850	 * registers are still accessible for devices suspended but
2851	 * not in D3cold.
2852	 */
2853	if (pdev->current_state == PCI_D3cold)
2854		pm_runtime_resume(dev);
2855}
2856
2857void pci_config_pm_runtime_put(struct pci_dev *pdev)
2858{
2859	struct device *dev = &pdev->dev;
2860	struct device *parent = dev->parent;
2861
2862	pm_runtime_put(dev);
2863	if (parent)
2864		pm_runtime_put_sync(parent);
2865}
2866
2867static const struct dmi_system_id bridge_d3_blacklist[] = {
2868#ifdef CONFIG_X86
2869	{
2870		/*
2871		 * Gigabyte X299 root port is not marked as hotplug capable
2872		 * which allows Linux to power manage it.  However, this
2873		 * confuses the BIOS SMI handler so don't power manage root
2874		 * ports on that system.
2875		 */
2876		.ident = "X299 DESIGNARE EX-CF",
2877		.matches = {
2878			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2879			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2880		},
2881	},
2882#endif
2883	{ }
2884};
2885
2886/**
2887 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2888 * @bridge: Bridge to check
2889 *
2890 * This function checks if it is possible to move the bridge to D3.
2891 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2892 */
2893bool pci_bridge_d3_possible(struct pci_dev *bridge)
2894{
2895	if (!pci_is_pcie(bridge))
2896		return false;
2897
2898	switch (pci_pcie_type(bridge)) {
2899	case PCI_EXP_TYPE_ROOT_PORT:
2900	case PCI_EXP_TYPE_UPSTREAM:
2901	case PCI_EXP_TYPE_DOWNSTREAM:
2902		if (pci_bridge_d3_disable)
2903			return false;
2904
2905		/*
2906		 * Hotplug ports handled by firmware in System Management Mode
2907		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2908		 */
2909		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2910			return false;
2911
2912		if (pci_bridge_d3_force)
2913			return true;
2914
2915		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2916		if (bridge->is_thunderbolt)
2917			return true;
2918
2919		/* Platform might know better if the bridge supports D3 */
2920		if (platform_pci_bridge_d3(bridge))
2921			return true;
2922
2923		/*
2924		 * Hotplug ports handled natively by the OS were not validated
2925		 * by vendors for runtime D3 at least until 2018 because there
2926		 * was no OS support.
2927		 */
2928		if (bridge->is_hotplug_bridge)
2929			return false;
2930
2931		if (dmi_check_system(bridge_d3_blacklist))
2932			return false;
2933
2934		/*
2935		 * It should be safe to put PCIe ports from 2015 or newer
2936		 * to D3.
2937		 */
2938		if (dmi_get_bios_year() >= 2015)
2939			return true;
2940		break;
2941	}
2942
2943	return false;
2944}
2945
2946static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2947{
2948	bool *d3cold_ok = data;
2949
2950	if (/* The device needs to be allowed to go D3cold ... */
2951	    dev->no_d3cold || !dev->d3cold_allowed ||
2952
2953	    /* ... and if it is wakeup capable to do so from D3cold. */
2954	    (device_may_wakeup(&dev->dev) &&
2955	     !pci_pme_capable(dev, PCI_D3cold)) ||
2956
2957	    /* If it is a bridge it must be allowed to go to D3. */
2958	    !pci_power_manageable(dev))
2959
2960		*d3cold_ok = false;
2961
2962	return !*d3cold_ok;
2963}
2964
2965/*
2966 * pci_bridge_d3_update - Update bridge D3 capabilities
2967 * @dev: PCI device which is changed
2968 *
2969 * Update upstream bridge PM capabilities accordingly depending on if the
2970 * device PM configuration was changed or the device is being removed.  The
2971 * change is also propagated upstream.
2972 */
2973void pci_bridge_d3_update(struct pci_dev *dev)
2974{
2975	bool remove = !device_is_registered(&dev->dev);
2976	struct pci_dev *bridge;
2977	bool d3cold_ok = true;
2978
2979	bridge = pci_upstream_bridge(dev);
2980	if (!bridge || !pci_bridge_d3_possible(bridge))
2981		return;
2982
2983	/*
2984	 * If D3 is currently allowed for the bridge, removing one of its
2985	 * children won't change that.
2986	 */
2987	if (remove && bridge->bridge_d3)
2988		return;
2989
2990	/*
2991	 * If D3 is currently allowed for the bridge and a child is added or
2992	 * changed, disallowance of D3 can only be caused by that child, so
2993	 * we only need to check that single device, not any of its siblings.
2994	 *
2995	 * If D3 is currently not allowed for the bridge, checking the device
2996	 * first may allow us to skip checking its siblings.
2997	 */
2998	if (!remove)
2999		pci_dev_check_d3cold(dev, &d3cold_ok);
3000
3001	/*
3002	 * If D3 is currently not allowed for the bridge, this may be caused
3003	 * either by the device being changed/removed or any of its siblings,
3004	 * so we need to go through all children to find out if one of them
3005	 * continues to block D3.
3006	 */
3007	if (d3cold_ok && !bridge->bridge_d3)
3008		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
3009			     &d3cold_ok);
3010
3011	if (bridge->bridge_d3 != d3cold_ok) {
3012		bridge->bridge_d3 = d3cold_ok;
3013		/* Propagate change to upstream bridges */
3014		pci_bridge_d3_update(bridge);
3015	}
3016}
3017
3018/**
3019 * pci_d3cold_enable - Enable D3cold for device
3020 * @dev: PCI device to handle
3021 *
3022 * This function can be used in drivers to enable D3cold from the device
3023 * they handle.  It also updates upstream PCI bridge PM capabilities
3024 * accordingly.
3025 */
3026void pci_d3cold_enable(struct pci_dev *dev)
3027{
3028	if (dev->no_d3cold) {
3029		dev->no_d3cold = false;
3030		pci_bridge_d3_update(dev);
3031	}
3032}
3033EXPORT_SYMBOL_GPL(pci_d3cold_enable);
3034
3035/**
3036 * pci_d3cold_disable - Disable D3cold for device
3037 * @dev: PCI device to handle
3038 *
3039 * This function can be used in drivers to disable D3cold from the device
3040 * they handle.  It also updates upstream PCI bridge PM capabilities
3041 * accordingly.
3042 */
3043void pci_d3cold_disable(struct pci_dev *dev)
3044{
3045	if (!dev->no_d3cold) {
3046		dev->no_d3cold = true;
3047		pci_bridge_d3_update(dev);
3048	}
3049}
3050EXPORT_SYMBOL_GPL(pci_d3cold_disable);
3051
3052/**
3053 * pci_pm_init - Initialize PM functions of given PCI device
3054 * @dev: PCI device to handle.
3055 */
3056void pci_pm_init(struct pci_dev *dev)
3057{
3058	int pm;
3059	u16 status;
3060	u16 pmc;
3061
3062	pm_runtime_forbid(&dev->dev);
3063	pm_runtime_set_active(&dev->dev);
3064	pm_runtime_enable(&dev->dev);
3065	device_enable_async_suspend(&dev->dev);
3066	dev->wakeup_prepared = false;
3067
3068	dev->pm_cap = 0;
3069	dev->pme_support = 0;
3070
3071	/* find PCI PM capability in list */
3072	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
3073	if (!pm)
3074		return;
3075	/* Check device's ability to generate PME# */
3076	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
3077
3078	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
3079		pci_err(dev, "unsupported PM cap regs version (%u)\n",
3080			pmc & PCI_PM_CAP_VER_MASK);
3081		return;
3082	}
3083
3084	dev->pm_cap = pm;
3085	dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
3086	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
3087	dev->bridge_d3 = pci_bridge_d3_possible(dev);
3088	dev->d3cold_allowed = true;
3089
3090	dev->d1_support = false;
3091	dev->d2_support = false;
3092	if (!pci_no_d1d2(dev)) {
3093		if (pmc & PCI_PM_CAP_D1)
3094			dev->d1_support = true;
3095		if (pmc & PCI_PM_CAP_D2)
3096			dev->d2_support = true;
3097
3098		if (dev->d1_support || dev->d2_support)
3099			pci_info(dev, "supports%s%s\n",
3100				   dev->d1_support ? " D1" : "",
3101				   dev->d2_support ? " D2" : "");
3102	}
3103
3104	pmc &= PCI_PM_CAP_PME_MASK;
3105	if (pmc) {
3106		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
 
3107			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
3108			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
3109			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
3110			 (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
3111			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
3112		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
3113		dev->pme_poll = true;
3114		/*
3115		 * Make device's PM flags reflect the wake-up capability, but
3116		 * let the user space enable it to wake up the system as needed.
3117		 */
3118		device_set_wakeup_capable(&dev->dev, true);
3119		/* Disable the PME# generation functionality */
3120		pci_pme_active(dev, false);
 
 
3121	}
3122
3123	pci_read_config_word(dev, PCI_STATUS, &status);
3124	if (status & PCI_STATUS_IMM_READY)
3125		dev->imm_ready = 1;
3126}
3127
3128static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
 
 
 
 
 
 
 
 
 
 
3129{
3130	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
3131
3132	switch (prop) {
3133	case PCI_EA_P_MEM:
3134	case PCI_EA_P_VF_MEM:
3135		flags |= IORESOURCE_MEM;
3136		break;
3137	case PCI_EA_P_MEM_PREFETCH:
3138	case PCI_EA_P_VF_MEM_PREFETCH:
3139		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
3140		break;
3141	case PCI_EA_P_IO:
3142		flags |= IORESOURCE_IO;
3143		break;
3144	default:
3145		return 0;
3146	}
3147
3148	return flags;
3149}
3150
3151static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
3152					    u8 prop)
3153{
3154	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
3155		return &dev->resource[bei];
3156#ifdef CONFIG_PCI_IOV
3157	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
3158		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
3159		return &dev->resource[PCI_IOV_RESOURCES +
3160				      bei - PCI_EA_BEI_VF_BAR0];
3161#endif
3162	else if (bei == PCI_EA_BEI_ROM)
3163		return &dev->resource[PCI_ROM_RESOURCE];
3164	else
3165		return NULL;
3166}
3167
3168/* Read an Enhanced Allocation (EA) entry */
3169static int pci_ea_read(struct pci_dev *dev, int offset)
3170{
3171	struct resource *res;
3172	int ent_size, ent_offset = offset;
3173	resource_size_t start, end;
3174	unsigned long flags;
3175	u32 dw0, bei, base, max_offset;
3176	u8 prop;
3177	bool support_64 = (sizeof(resource_size_t) >= 8);
3178
3179	pci_read_config_dword(dev, ent_offset, &dw0);
3180	ent_offset += 4;
3181
3182	/* Entry size field indicates DWORDs after 1st */
3183	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
3184
3185	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
3186		goto out;
3187
3188	bei = (dw0 & PCI_EA_BEI) >> 4;
3189	prop = (dw0 & PCI_EA_PP) >> 8;
3190
3191	/*
3192	 * If the Property is in the reserved range, try the Secondary
3193	 * Property instead.
3194	 */
3195	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
3196		prop = (dw0 & PCI_EA_SP) >> 16;
3197	if (prop > PCI_EA_P_BRIDGE_IO)
3198		goto out;
3199
3200	res = pci_ea_get_resource(dev, bei, prop);
3201	if (!res) {
3202		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
3203		goto out;
3204	}
3205
3206	flags = pci_ea_flags(dev, prop);
3207	if (!flags) {
3208		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
3209		goto out;
3210	}
3211
3212	/* Read Base */
3213	pci_read_config_dword(dev, ent_offset, &base);
3214	start = (base & PCI_EA_FIELD_MASK);
3215	ent_offset += 4;
3216
3217	/* Read MaxOffset */
3218	pci_read_config_dword(dev, ent_offset, &max_offset);
3219	ent_offset += 4;
3220
3221	/* Read Base MSBs (if 64-bit entry) */
3222	if (base & PCI_EA_IS_64) {
3223		u32 base_upper;
3224
3225		pci_read_config_dword(dev, ent_offset, &base_upper);
3226		ent_offset += 4;
3227
3228		flags |= IORESOURCE_MEM_64;
3229
3230		/* entry starts above 32-bit boundary, can't use */
3231		if (!support_64 && base_upper)
3232			goto out;
3233
3234		if (support_64)
3235			start |= ((u64)base_upper << 32);
3236	}
3237
3238	end = start + (max_offset | 0x03);
3239
3240	/* Read MaxOffset MSBs (if 64-bit entry) */
3241	if (max_offset & PCI_EA_IS_64) {
3242		u32 max_offset_upper;
3243
3244		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
3245		ent_offset += 4;
3246
3247		flags |= IORESOURCE_MEM_64;
3248
3249		/* entry too big, can't use */
3250		if (!support_64 && max_offset_upper)
3251			goto out;
3252
3253		if (support_64)
3254			end += ((u64)max_offset_upper << 32);
3255	}
3256
3257	if (end < start) {
3258		pci_err(dev, "EA Entry crosses address boundary\n");
3259		goto out;
3260	}
3261
3262	if (ent_size != ent_offset - offset) {
3263		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
3264			ent_size, ent_offset - offset);
3265		goto out;
3266	}
3267
3268	res->name = pci_name(dev);
3269	res->start = start;
3270	res->end = end;
3271	res->flags = flags;
3272
3273	if (bei <= PCI_EA_BEI_BAR5)
3274		pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3275			   bei, res, prop);
3276	else if (bei == PCI_EA_BEI_ROM)
3277		pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3278			   res, prop);
3279	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3280		pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3281			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
3282	else
3283		pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3284			   bei, res, prop);
3285
3286out:
3287	return offset + ent_size;
3288}
3289
3290/* Enhanced Allocation Initialization */
3291void pci_ea_init(struct pci_dev *dev)
3292{
3293	int ea;
3294	u8 num_ent;
3295	int offset;
3296	int i;
3297
3298	/* find PCI EA capability in list */
3299	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3300	if (!ea)
3301		return;
3302
3303	/* determine the number of entries */
3304	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3305					&num_ent);
3306	num_ent &= PCI_EA_NUM_ENT_MASK;
3307
3308	offset = ea + PCI_EA_FIRST_ENT;
3309
3310	/* Skip DWORD 2 for type 1 functions */
3311	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3312		offset += 4;
3313
3314	/* parse each EA entry */
3315	for (i = 0; i < num_ent; ++i)
3316		offset = pci_ea_read(dev, offset);
3317}
3318
3319static void pci_add_saved_cap(struct pci_dev *pci_dev,
3320	struct pci_cap_saved_state *new_cap)
3321{
3322	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3323}
3324
3325/**
3326 * _pci_add_cap_save_buffer - allocate buffer for saving given
3327 *			      capability registers
3328 * @dev: the PCI device
3329 * @cap: the capability to allocate the buffer for
3330 * @extended: Standard or Extended capability ID
3331 * @size: requested size of the buffer
3332 */
3333static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3334				    bool extended, unsigned int size)
3335{
3336	int pos;
3337	struct pci_cap_saved_state *save_state;
3338
3339	if (extended)
3340		pos = pci_find_ext_capability(dev, cap);
3341	else
3342		pos = pci_find_capability(dev, cap);
3343
3344	if (!pos)
3345		return 0;
3346
3347	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3348	if (!save_state)
3349		return -ENOMEM;
3350
3351	save_state->cap.cap_nr = cap;
3352	save_state->cap.cap_extended = extended;
3353	save_state->cap.size = size;
3354	pci_add_saved_cap(dev, save_state);
3355
3356	return 0;
3357}
3358
3359int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3360{
3361	return _pci_add_cap_save_buffer(dev, cap, false, size);
3362}
3363
3364int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3365{
3366	return _pci_add_cap_save_buffer(dev, cap, true, size);
3367}
3368
3369/**
3370 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3371 * @dev: the PCI device
3372 */
3373void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3374{
3375	int error;
3376
3377	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3378					PCI_EXP_SAVE_REGS * sizeof(u16));
3379	if (error)
3380		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
 
3381
3382	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3383	if (error)
3384		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3385
3386	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3387					    2 * sizeof(u16));
3388	if (error)
3389		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3390
3391	pci_allocate_vc_save_buffers(dev);
3392}
3393
3394void pci_free_cap_save_buffers(struct pci_dev *dev)
3395{
3396	struct pci_cap_saved_state *tmp;
3397	struct hlist_node *n;
3398
3399	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3400		kfree(tmp);
3401}
3402
3403/**
3404 * pci_configure_ari - enable or disable ARI forwarding
3405 * @dev: the PCI device
3406 *
3407 * If @dev and its upstream bridge both support ARI, enable ARI in the
3408 * bridge.  Otherwise, disable ARI in the bridge.
3409 */
3410void pci_configure_ari(struct pci_dev *dev)
3411{
 
3412	u32 cap;
 
3413	struct pci_dev *bridge;
3414
3415	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
3416		return;
3417
 
 
 
 
3418	bridge = dev->bus->self;
3419	if (!bridge)
3420		return;
3421
3422	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
 
 
 
 
 
 
 
 
 
3423	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3424		return;
3425
3426	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3427		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3428					 PCI_EXP_DEVCTL2_ARI);
3429		bridge->ari_enabled = 1;
3430	} else {
3431		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3432					   PCI_EXP_DEVCTL2_ARI);
3433		bridge->ari_enabled = 0;
3434	}
3435}
3436
3437static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
 
 
 
 
 
 
 
 
 
3438{
3439	int pos;
3440	u16 cap, ctrl;
3441
3442	pos = pdev->acs_cap;
3443	if (!pos)
3444		return false;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3445
3446	/*
3447	 * Except for egress control, capabilities are either required
3448	 * or only required if controllable.  Features missing from the
3449	 * capability field can therefore be assumed as hard-wired enabled.
3450	 */
3451	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3452	acs_flags &= (cap | PCI_ACS_EC);
3453
3454	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3455	return (ctrl & acs_flags) == acs_flags;
 
 
 
 
3456}
 
3457
3458/**
3459 * pci_acs_enabled - test ACS against required flags for a given device
3460 * @pdev: device to test
3461 * @acs_flags: required PCI ACS flags
3462 *
3463 * Return true if the device supports the provided flags.  Automatically
3464 * filters out flags that are not implemented on multifunction devices.
 
 
 
 
 
 
 
 
3465 *
3466 * Note that this interface checks the effective ACS capabilities of the
3467 * device rather than the actual capabilities.  For instance, most single
3468 * function endpoints are not required to support ACS because they have no
3469 * opportunity for peer-to-peer access.  We therefore return 'true'
3470 * regardless of whether the device exposes an ACS capability.  This makes
3471 * it much easier for callers of this function to ignore the actual type
3472 * or topology of the device when testing ACS support.
3473 */
3474bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3475{
 
 
 
3476	int ret;
3477
3478	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3479	if (ret >= 0)
3480		return ret > 0;
 
 
 
3481
3482	/*
3483	 * Conventional PCI and PCI-X devices never support ACS, either
3484	 * effectively or actually.  The shared bus topology implies that
3485	 * any device on the bus can receive or snoop DMA.
3486	 */
3487	if (!pci_is_pcie(pdev))
3488		return false;
 
 
 
3489
3490	switch (pci_pcie_type(pdev)) {
3491	/*
3492	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3493	 * but since their primary interface is PCI/X, we conservatively
3494	 * handle them as we would a non-PCIe device.
3495	 */
3496	case PCI_EXP_TYPE_PCIE_BRIDGE:
3497	/*
3498	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3499	 * applicable... must never implement an ACS Extended Capability...".
3500	 * This seems arbitrary, but we take a conservative interpretation
3501	 * of this statement.
3502	 */
3503	case PCI_EXP_TYPE_PCI_BRIDGE:
3504	case PCI_EXP_TYPE_RC_EC:
3505		return false;
3506	/*
3507	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3508	 * implement ACS in order to indicate their peer-to-peer capabilities,
3509	 * regardless of whether they are single- or multi-function devices.
3510	 */
3511	case PCI_EXP_TYPE_DOWNSTREAM:
3512	case PCI_EXP_TYPE_ROOT_PORT:
3513		return pci_acs_flags_enabled(pdev, acs_flags);
3514	/*
3515	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3516	 * implemented by the remaining PCIe types to indicate peer-to-peer
3517	 * capabilities, but only when they are part of a multifunction
3518	 * device.  The footnote for section 6.12 indicates the specific
3519	 * PCIe types included here.
3520	 */
3521	case PCI_EXP_TYPE_ENDPOINT:
3522	case PCI_EXP_TYPE_UPSTREAM:
3523	case PCI_EXP_TYPE_LEG_END:
3524	case PCI_EXP_TYPE_RC_END:
3525		if (!pdev->multifunction)
3526			break;
3527
3528		return pci_acs_flags_enabled(pdev, acs_flags);
 
 
3529	}
 
3530
3531	/*
3532	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3533	 * to single function devices with the exception of downstream ports.
3534	 */
3535	return true;
3536}
 
3537
3538/**
3539 * pci_acs_path_enabled - test ACS flags from start to end in a hierarchy
3540 * @start: starting downstream device
3541 * @end: ending upstream device or NULL to search to the root bus
3542 * @acs_flags: required flags
3543 *
3544 * Walk up a device tree from start to end testing PCI ACS support.  If
3545 * any step along the way does not support the required flags, return false.
3546 */
3547bool pci_acs_path_enabled(struct pci_dev *start,
3548			  struct pci_dev *end, u16 acs_flags)
3549{
3550	struct pci_dev *pdev, *parent = start;
 
3551
3552	do {
3553		pdev = parent;
3554
3555		if (!pci_acs_enabled(pdev, acs_flags))
3556			return false;
 
3557
3558		if (pci_is_root_bus(pdev->bus))
3559			return (end == NULL);
3560
3561		parent = pdev->bus->self;
3562	} while (pdev != end);
3563
3564	return true;
3565}
 
3566
3567/**
3568 * pci_acs_init - Initialize ACS if hardware supports it
3569 * @dev: the PCI device
 
 
 
3570 */
3571void pci_acs_init(struct pci_dev *dev)
3572{
3573	dev->acs_cap = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
 
 
 
 
 
 
 
 
3574
3575	/*
3576	 * Attempt to enable ACS regardless of capability because some Root
3577	 * Ports (e.g. those quirked with *_intel_pch_acs_*) do not have
3578	 * the standard ACS capability but still support ACS via those
3579	 * quirks.
3580	 */
3581	pci_enable_acs(dev);
3582}
 
3583
3584/**
3585 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3586 * @pdev: PCI device
3587 * @bar: BAR to find
3588 *
3589 * Helper to find the position of the ctrl register for a BAR.
3590 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3591 * Returns -ENOENT if no ctrl register for the BAR could be found.
 
 
3592 */
3593static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3594{
3595	unsigned int pos, nbars, i;
3596	u32 ctrl;
 
 
 
 
3597
3598	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3599	if (!pos)
3600		return -ENOTSUPP;
3601
3602	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3603	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3604		    PCI_REBAR_CTRL_NBAR_SHIFT;
3605
3606	for (i = 0; i < nbars; i++, pos += 8) {
3607		int bar_idx;
3608
3609		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3610		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3611		if (bar_idx == bar)
3612			return pos;
3613	}
3614
3615	return -ENOENT;
 
 
 
 
3616}
 
3617
3618/**
3619 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3620 * @pdev: PCI device
3621 * @bar: BAR to query
3622 *
3623 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3624 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3625 */
3626u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3627{
3628	int pos;
3629	u32 cap;
3630
3631	pos = pci_rebar_find_pos(pdev, bar);
3632	if (pos < 0)
3633		return 0;
3634
3635	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3636	cap &= PCI_REBAR_CAP_SIZES;
 
3637
3638	/* Sapphire RX 5600 XT Pulse has an invalid cap dword for BAR 0 */
3639	if (pdev->vendor == PCI_VENDOR_ID_ATI && pdev->device == 0x731f &&
3640	    bar == 0 && cap == 0x7000)
3641		cap = 0x3f000;
3642
3643	return cap >> 4;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3644}
3645EXPORT_SYMBOL(pci_rebar_get_possible_sizes);
3646
3647/**
3648 * pci_rebar_get_current_size - get the current size of a BAR
3649 * @pdev: PCI device
3650 * @bar: BAR to set size to
 
3651 *
3652 * Read the size of a BAR from the resizable BAR config.
3653 * Returns size if found or negative error code.
3654 */
3655int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3656{
3657	int pos;
3658	u32 ctrl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3659
3660	pos = pci_rebar_find_pos(pdev, bar);
3661	if (pos < 0)
3662		return pos;
 
3663
3664	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3665	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3666}
 
 
 
3667
3668/**
3669 * pci_rebar_set_size - set a new size for a BAR
3670 * @pdev: PCI device
3671 * @bar: BAR to set size to
3672 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3673 *
3674 * Set the new size of a BAR as defined in the spec.
3675 * Returns zero if resizing was successful, error code otherwise.
3676 */
3677int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3678{
3679	int pos;
3680	u32 ctrl;
3681
3682	pos = pci_rebar_find_pos(pdev, bar);
3683	if (pos < 0)
3684		return pos;
3685
3686	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3687	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3688	ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3689	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3690	return 0;
3691}
3692
3693/**
3694 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3695 * @dev: the PCI device
3696 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3697 *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3698 *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3699 *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3700 *
3701 * Return 0 if all upstream bridges support AtomicOp routing, egress
3702 * blocking is disabled on all upstream ports, and the root port supports
3703 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3704 * AtomicOp completion), or negative otherwise.
3705 */
3706int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3707{
3708	struct pci_bus *bus = dev->bus;
3709	struct pci_dev *bridge;
3710	u32 cap, ctl2;
 
 
 
3711
3712	if (!pci_is_pcie(dev))
3713		return -EINVAL;
3714
3715	/*
3716	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3717	 * AtomicOp requesters.  For now, we only support endpoints as
3718	 * requesters and root ports as completers.  No endpoints as
3719	 * completers, and no peer-to-peer.
3720	 */
3721
3722	switch (pci_pcie_type(dev)) {
3723	case PCI_EXP_TYPE_ENDPOINT:
3724	case PCI_EXP_TYPE_LEG_END:
3725	case PCI_EXP_TYPE_RC_END:
3726		break;
3727	default:
3728		return -EINVAL;
3729	}
3730
3731	while (bus->parent) {
3732		bridge = bus->self;
3733
3734		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
 
3735
3736		switch (pci_pcie_type(bridge)) {
3737		/* Ensure switch ports support AtomicOp routing */
3738		case PCI_EXP_TYPE_UPSTREAM:
3739		case PCI_EXP_TYPE_DOWNSTREAM:
3740			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3741				return -EINVAL;
3742			break;
3743
3744		/* Ensure root port supports all the sizes we care about */
3745		case PCI_EXP_TYPE_ROOT_PORT:
3746			if ((cap & cap_mask) != cap_mask)
3747				return -EINVAL;
3748			break;
3749		}
3750
3751		/* Ensure upstream ports don't block AtomicOps on egress */
3752		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3753			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3754						   &ctl2);
3755			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3756				return -EINVAL;
3757		}
3758
3759		bus = bus->parent;
3760	}
3761
3762	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3763				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3764	return 0;
3765}
3766EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3767
3768/**
3769 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3770 * @dev: the PCI device
3771 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3772 *
3773 * Perform INTx swizzling for a device behind one level of bridge.  This is
3774 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3775 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3776 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3777 * the PCI Express Base Specification, Revision 2.1)
3778 */
3779u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3780{
3781	int slot;
3782
3783	if (pci_ari_enabled(dev->bus))
3784		slot = 0;
3785	else
3786		slot = PCI_SLOT(dev->devfn);
3787
3788	return (((pin - 1) + slot) % 4) + 1;
3789}
3790
3791int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
 
3792{
3793	u8 pin;
3794
3795	pin = dev->pin;
3796	if (!pin)
3797		return -1;
3798
3799	while (!pci_is_root_bus(dev->bus)) {
3800		pin = pci_swizzle_interrupt_pin(dev, pin);
3801		dev = dev->bus->self;
3802	}
3803	*bridge = dev;
3804	return pin;
3805}
3806
3807/**
3808 * pci_common_swizzle - swizzle INTx all the way to root bridge
3809 * @dev: the PCI device
3810 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3811 *
3812 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3813 * bridges all the way up to a PCI root bus.
3814 */
3815u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3816{
3817	u8 pin = *pinp;
3818
3819	while (!pci_is_root_bus(dev->bus)) {
3820		pin = pci_swizzle_interrupt_pin(dev, pin);
3821		dev = dev->bus->self;
3822	}
3823	*pinp = pin;
3824	return PCI_SLOT(dev->devfn);
3825}
3826EXPORT_SYMBOL_GPL(pci_common_swizzle);
3827
3828/**
3829 * pci_release_region - Release a PCI bar
3830 * @pdev: PCI device whose resources were previously reserved by
3831 *	  pci_request_region()
3832 * @bar: BAR to release
3833 *
3834 * Releases the PCI I/O and memory resources previously reserved by a
3835 * successful call to pci_request_region().  Call this function only
3836 * after all use of the PCI regions has ceased.
3837 */
3838void pci_release_region(struct pci_dev *pdev, int bar)
3839{
3840	struct pci_devres *dr;
3841
3842	if (pci_resource_len(pdev, bar) == 0)
3843		return;
3844	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3845		release_region(pci_resource_start(pdev, bar),
3846				pci_resource_len(pdev, bar));
3847	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3848		release_mem_region(pci_resource_start(pdev, bar),
3849				pci_resource_len(pdev, bar));
3850
3851	dr = find_pci_dr(pdev);
3852	if (dr)
3853		dr->region_mask &= ~(1 << bar);
3854}
3855EXPORT_SYMBOL(pci_release_region);
3856
3857/**
3858 * __pci_request_region - Reserved PCI I/O and memory resource
3859 * @pdev: PCI device whose resources are to be reserved
3860 * @bar: BAR to be reserved
3861 * @res_name: Name to be associated with resource.
3862 * @exclusive: whether the region access is exclusive or not
3863 *
3864 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3865 * being reserved by owner @res_name.  Do not access any
3866 * address inside the PCI regions unless this call returns
3867 * successfully.
3868 *
3869 * If @exclusive is set, then the region is marked so that userspace
3870 * is explicitly not allowed to map the resource via /dev/mem or
3871 * sysfs MMIO access.
3872 *
3873 * Returns 0 on success, or %EBUSY on error.  A warning
3874 * message is also printed on failure.
3875 */
3876static int __pci_request_region(struct pci_dev *pdev, int bar,
3877				const char *res_name, int exclusive)
3878{
3879	struct pci_devres *dr;
3880
3881	if (pci_resource_len(pdev, bar) == 0)
3882		return 0;
3883
3884	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3885		if (!request_region(pci_resource_start(pdev, bar),
3886			    pci_resource_len(pdev, bar), res_name))
3887			goto err_out;
3888	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 
3889		if (!__request_mem_region(pci_resource_start(pdev, bar),
3890					pci_resource_len(pdev, bar), res_name,
3891					exclusive))
3892			goto err_out;
3893	}
3894
3895	dr = find_pci_dr(pdev);
3896	if (dr)
3897		dr->region_mask |= 1 << bar;
3898
3899	return 0;
3900
3901err_out:
3902	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3903		 &pdev->resource[bar]);
3904	return -EBUSY;
3905}
3906
3907/**
3908 * pci_request_region - Reserve PCI I/O and memory resource
3909 * @pdev: PCI device whose resources are to be reserved
3910 * @bar: BAR to be reserved
3911 * @res_name: Name to be associated with resource
3912 *
3913 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3914 * being reserved by owner @res_name.  Do not access any
3915 * address inside the PCI regions unless this call returns
3916 * successfully.
3917 *
3918 * Returns 0 on success, or %EBUSY on error.  A warning
3919 * message is also printed on failure.
3920 */
3921int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3922{
3923	return __pci_request_region(pdev, bar, res_name, 0);
3924}
3925EXPORT_SYMBOL(pci_request_region);
3926
3927/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3928 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3929 * @pdev: PCI device whose resources were previously reserved
3930 * @bars: Bitmask of BARs to be released
3931 *
3932 * Release selected PCI I/O and memory resources previously reserved.
3933 * Call this function only after all use of the PCI regions has ceased.
3934 */
3935void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3936{
3937	int i;
3938
3939	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3940		if (bars & (1 << i))
3941			pci_release_region(pdev, i);
3942}
3943EXPORT_SYMBOL(pci_release_selected_regions);
3944
3945static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3946					  const char *res_name, int excl)
3947{
3948	int i;
3949
3950	for (i = 0; i < PCI_STD_NUM_BARS; i++)
3951		if (bars & (1 << i))
3952			if (__pci_request_region(pdev, i, res_name, excl))
3953				goto err_out;
3954	return 0;
3955
3956err_out:
3957	while (--i >= 0)
3958		if (bars & (1 << i))
3959			pci_release_region(pdev, i);
3960
3961	return -EBUSY;
3962}
3963
3964
3965/**
3966 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3967 * @pdev: PCI device whose resources are to be reserved
3968 * @bars: Bitmask of BARs to be requested
3969 * @res_name: Name to be associated with resource
3970 */
3971int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3972				 const char *res_name)
3973{
3974	return __pci_request_selected_regions(pdev, bars, res_name, 0);
3975}
3976EXPORT_SYMBOL(pci_request_selected_regions);
3977
3978int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3979					   const char *res_name)
3980{
3981	return __pci_request_selected_regions(pdev, bars, res_name,
3982			IORESOURCE_EXCLUSIVE);
3983}
3984EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3985
3986/**
3987 * pci_release_regions - Release reserved PCI I/O and memory resources
3988 * @pdev: PCI device whose resources were previously reserved by
3989 *	  pci_request_regions()
3990 *
3991 * Releases all PCI I/O and memory resources previously reserved by a
3992 * successful call to pci_request_regions().  Call this function only
3993 * after all use of the PCI regions has ceased.
3994 */
3995
3996void pci_release_regions(struct pci_dev *pdev)
3997{
3998	pci_release_selected_regions(pdev, (1 << PCI_STD_NUM_BARS) - 1);
3999}
4000EXPORT_SYMBOL(pci_release_regions);
4001
4002/**
4003 * pci_request_regions - Reserve PCI I/O and memory resources
4004 * @pdev: PCI device whose resources are to be reserved
4005 * @res_name: Name to be associated with resource.
4006 *
4007 * Mark all PCI regions associated with PCI device @pdev as
4008 * being reserved by owner @res_name.  Do not access any
4009 * address inside the PCI regions unless this call returns
4010 * successfully.
4011 *
4012 * Returns 0 on success, or %EBUSY on error.  A warning
4013 * message is also printed on failure.
4014 */
4015int pci_request_regions(struct pci_dev *pdev, const char *res_name)
4016{
4017	return pci_request_selected_regions(pdev,
4018			((1 << PCI_STD_NUM_BARS) - 1), res_name);
4019}
4020EXPORT_SYMBOL(pci_request_regions);
4021
4022/**
4023 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
4024 * @pdev: PCI device whose resources are to be reserved
4025 * @res_name: Name to be associated with resource.
4026 *
4027 * Mark all PCI regions associated with PCI device @pdev as being reserved
4028 * by owner @res_name.  Do not access any address inside the PCI regions
4029 * unless this call returns successfully.
 
4030 *
4031 * pci_request_regions_exclusive() will mark the region so that /dev/mem
4032 * and the sysfs MMIO access will not be allowed.
4033 *
4034 * Returns 0 on success, or %EBUSY on error.  A warning message is also
4035 * printed on failure.
4036 */
4037int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
4038{
4039	return pci_request_selected_regions_exclusive(pdev,
4040				((1 << PCI_STD_NUM_BARS) - 1), res_name);
4041}
4042EXPORT_SYMBOL(pci_request_regions_exclusive);
4043
4044/*
4045 * Record the PCI IO range (expressed as CPU physical address + size).
4046 * Return a negative value if an error has occurred, zero otherwise
4047 */
4048int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
4049			resource_size_t	size)
4050{
4051	int ret = 0;
4052#ifdef PCI_IOBASE
4053	struct logic_pio_hwaddr *range;
4054
4055	if (!size || addr + size < addr)
4056		return -EINVAL;
4057
4058	range = kzalloc(sizeof(*range), GFP_ATOMIC);
4059	if (!range)
4060		return -ENOMEM;
4061
4062	range->fwnode = fwnode;
4063	range->size = size;
4064	range->hw_start = addr;
4065	range->flags = LOGIC_PIO_CPU_MMIO;
4066
4067	ret = logic_pio_register_range(range);
4068	if (ret)
4069		kfree(range);
4070
4071	/* Ignore duplicates due to deferred probing */
4072	if (ret == -EEXIST)
4073		ret = 0;
4074#endif
4075
4076	return ret;
4077}
4078
4079phys_addr_t pci_pio_to_address(unsigned long pio)
4080{
4081	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
4082
4083#ifdef PCI_IOBASE
4084	if (pio >= MMIO_UPPER_LIMIT)
4085		return address;
4086
4087	address = logic_pio_to_hwaddr(pio);
4088#endif
4089
4090	return address;
4091}
4092EXPORT_SYMBOL_GPL(pci_pio_to_address);
4093
4094unsigned long __weak pci_address_to_pio(phys_addr_t address)
4095{
4096#ifdef PCI_IOBASE
4097	return logic_pio_trans_cpuaddr(address);
4098#else
4099	if (address > IO_SPACE_LIMIT)
4100		return (unsigned long)-1;
4101
4102	return (unsigned long) address;
4103#endif
4104}
4105
4106/**
4107 * pci_remap_iospace - Remap the memory mapped I/O space
4108 * @res: Resource describing the I/O space
4109 * @phys_addr: physical address of range to be mapped
4110 *
4111 * Remap the memory mapped I/O space described by the @res and the CPU
4112 * physical address @phys_addr into virtual address space.  Only
4113 * architectures that have memory mapped IO functions defined (and the
4114 * PCI_IOBASE value defined) should call this function.
4115 */
4116int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
4117{
4118#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4119	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4120
4121	if (!(res->flags & IORESOURCE_IO))
4122		return -EINVAL;
4123
4124	if (res->end > IO_SPACE_LIMIT)
4125		return -EINVAL;
4126
4127	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
4128				  pgprot_device(PAGE_KERNEL));
4129#else
4130	/*
4131	 * This architecture does not have memory mapped I/O space,
4132	 * so this function should never be called
4133	 */
4134	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
4135	return -ENODEV;
4136#endif
4137}
4138EXPORT_SYMBOL(pci_remap_iospace);
4139
4140/**
4141 * pci_unmap_iospace - Unmap the memory mapped I/O space
4142 * @res: resource to be unmapped
4143 *
4144 * Unmap the CPU virtual address @res from virtual address space.  Only
4145 * architectures that have memory mapped IO functions defined (and the
4146 * PCI_IOBASE value defined) should call this function.
4147 */
4148void pci_unmap_iospace(struct resource *res)
4149{
4150#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
4151	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
4152
4153	vunmap_range(vaddr, vaddr + resource_size(res));
4154#endif
4155}
4156EXPORT_SYMBOL(pci_unmap_iospace);
4157
4158static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
4159{
4160	struct resource **res = ptr;
4161
4162	pci_unmap_iospace(*res);
4163}
4164
4165/**
4166 * devm_pci_remap_iospace - Managed pci_remap_iospace()
4167 * @dev: Generic device to remap IO address for
4168 * @res: Resource describing the I/O space
4169 * @phys_addr: physical address of range to be mapped
4170 *
4171 * Managed pci_remap_iospace().  Map is automatically unmapped on driver
4172 * detach.
4173 */
4174int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
4175			   phys_addr_t phys_addr)
4176{
4177	const struct resource **ptr;
4178	int error;
4179
4180	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4181	if (!ptr)
4182		return -ENOMEM;
4183
4184	error = pci_remap_iospace(res, phys_addr);
4185	if (error) {
4186		devres_free(ptr);
4187	} else	{
4188		*ptr = res;
4189		devres_add(dev, ptr);
4190	}
4191
4192	return error;
4193}
4194EXPORT_SYMBOL(devm_pci_remap_iospace);
4195
4196/**
4197 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4198 * @dev: Generic device to remap IO address for
4199 * @offset: Resource address to map
4200 * @size: Size of map
4201 *
4202 * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4203 * detach.
4204 */
4205void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4206				      resource_size_t offset,
4207				      resource_size_t size)
4208{
4209	void __iomem **ptr, *addr;
4210
4211	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4212	if (!ptr)
4213		return NULL;
4214
4215	addr = pci_remap_cfgspace(offset, size);
4216	if (addr) {
4217		*ptr = addr;
4218		devres_add(dev, ptr);
4219	} else
4220		devres_free(ptr);
4221
4222	return addr;
4223}
4224EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4225
4226/**
4227 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4228 * @dev: generic device to handle the resource for
4229 * @res: configuration space resource to be handled
4230 *
4231 * Checks that a resource is a valid memory region, requests the memory
4232 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4233 * proper PCI configuration space memory attributes are guaranteed.
4234 *
4235 * All operations are managed and will be undone on driver detach.
4236 *
4237 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4238 * on failure. Usage example::
4239 *
4240 *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4241 *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4242 *	if (IS_ERR(base))
4243 *		return PTR_ERR(base);
4244 */
4245void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4246					  struct resource *res)
4247{
4248	resource_size_t size;
4249	const char *name;
4250	void __iomem *dest_ptr;
4251
4252	BUG_ON(!dev);
4253
4254	if (!res || resource_type(res) != IORESOURCE_MEM) {
4255		dev_err(dev, "invalid resource\n");
4256		return IOMEM_ERR_PTR(-EINVAL);
4257	}
4258
4259	size = resource_size(res);
4260
4261	if (res->name)
4262		name = devm_kasprintf(dev, GFP_KERNEL, "%s %s", dev_name(dev),
4263				      res->name);
4264	else
4265		name = devm_kstrdup(dev, dev_name(dev), GFP_KERNEL);
4266	if (!name)
4267		return IOMEM_ERR_PTR(-ENOMEM);
4268
4269	if (!devm_request_mem_region(dev, res->start, size, name)) {
4270		dev_err(dev, "can't request region for resource %pR\n", res);
4271		return IOMEM_ERR_PTR(-EBUSY);
4272	}
4273
4274	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4275	if (!dest_ptr) {
4276		dev_err(dev, "ioremap failed for resource %pR\n", res);
4277		devm_release_mem_region(dev, res->start, size);
4278		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4279	}
4280
4281	return dest_ptr;
4282}
4283EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4284
4285static void __pci_set_master(struct pci_dev *dev, bool enable)
4286{
4287	u16 old_cmd, cmd;
4288
4289	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4290	if (enable)
4291		cmd = old_cmd | PCI_COMMAND_MASTER;
4292	else
4293		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4294	if (cmd != old_cmd) {
4295		pci_dbg(dev, "%s bus mastering\n",
4296			enable ? "enabling" : "disabling");
4297		pci_write_config_word(dev, PCI_COMMAND, cmd);
4298	}
4299	dev->is_busmaster = enable;
4300}
4301
4302/**
4303 * pcibios_setup - process "pci=" kernel boot arguments
4304 * @str: string used to pass in "pci=" kernel boot arguments
4305 *
4306 * Process kernel boot arguments.  This is the default implementation.
4307 * Architecture specific implementations can override this as necessary.
4308 */
4309char * __weak __init pcibios_setup(char *str)
4310{
4311	return str;
4312}
4313
4314/**
4315 * pcibios_set_master - enable PCI bus-mastering for device dev
4316 * @dev: the PCI device to enable
4317 *
4318 * Enables PCI bus-mastering for the device.  This is the default
4319 * implementation.  Architecture specific implementations can override
4320 * this if necessary.
4321 */
4322void __weak pcibios_set_master(struct pci_dev *dev)
4323{
4324	u8 lat;
4325
4326	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4327	if (pci_is_pcie(dev))
4328		return;
4329
4330	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4331	if (lat < 16)
4332		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4333	else if (lat > pcibios_max_latency)
4334		lat = pcibios_max_latency;
4335	else
4336		return;
4337
4338	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4339}
4340
4341/**
4342 * pci_set_master - enables bus-mastering for device dev
4343 * @dev: the PCI device to enable
4344 *
4345 * Enables bus-mastering on the device and calls pcibios_set_master()
4346 * to do the needed arch specific settings.
4347 */
4348void pci_set_master(struct pci_dev *dev)
4349{
4350	__pci_set_master(dev, true);
4351	pcibios_set_master(dev);
4352}
4353EXPORT_SYMBOL(pci_set_master);
4354
4355/**
4356 * pci_clear_master - disables bus-mastering for device dev
4357 * @dev: the PCI device to disable
4358 */
4359void pci_clear_master(struct pci_dev *dev)
4360{
4361	__pci_set_master(dev, false);
4362}
4363EXPORT_SYMBOL(pci_clear_master);
4364
4365/**
4366 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4367 * @dev: the PCI device for which MWI is to be enabled
4368 *
4369 * Helper function for pci_set_mwi.
4370 * Originally copied from drivers/net/acenic.c.
4371 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4372 *
4373 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4374 */
4375int pci_set_cacheline_size(struct pci_dev *dev)
4376{
4377	u8 cacheline_size;
4378
4379	if (!pci_cache_line_size)
4380		return -EINVAL;
4381
4382	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4383	   equal to or multiple of the right value. */
4384	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4385	if (cacheline_size >= pci_cache_line_size &&
4386	    (cacheline_size % pci_cache_line_size) == 0)
4387		return 0;
4388
4389	/* Write the correct value. */
4390	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4391	/* Read it back. */
4392	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4393	if (cacheline_size == pci_cache_line_size)
4394		return 0;
4395
4396	pci_dbg(dev, "cache line size of %d is not supported\n",
4397		   pci_cache_line_size << 2);
4398
4399	return -EINVAL;
4400}
4401EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4402
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4403/**
4404 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4405 * @dev: the PCI device for which MWI is enabled
4406 *
4407 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4408 *
4409 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4410 */
4411int pci_set_mwi(struct pci_dev *dev)
 
4412{
4413#ifdef PCI_DISABLE_MWI
4414	return 0;
4415#else
4416	int rc;
4417	u16 cmd;
4418
4419	rc = pci_set_cacheline_size(dev);
4420	if (rc)
4421		return rc;
4422
4423	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4424	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4425		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4426		cmd |= PCI_COMMAND_INVALIDATE;
4427		pci_write_config_word(dev, PCI_COMMAND, cmd);
4428	}
 
4429	return 0;
4430#endif
4431}
4432EXPORT_SYMBOL(pci_set_mwi);
4433
4434/**
4435 * pcim_set_mwi - a device-managed pci_set_mwi()
4436 * @dev: the PCI device for which MWI is enabled
4437 *
4438 * Managed pci_set_mwi().
4439 *
4440 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4441 */
4442int pcim_set_mwi(struct pci_dev *dev)
4443{
4444	struct pci_devres *dr;
4445
4446	dr = find_pci_dr(dev);
4447	if (!dr)
4448		return -ENOMEM;
4449
4450	dr->mwi = 1;
4451	return pci_set_mwi(dev);
4452}
4453EXPORT_SYMBOL(pcim_set_mwi);
4454
4455/**
4456 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4457 * @dev: the PCI device for which MWI is enabled
4458 *
4459 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4460 * Callers are not required to check the return value.
4461 *
4462 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4463 */
4464int pci_try_set_mwi(struct pci_dev *dev)
4465{
4466#ifdef PCI_DISABLE_MWI
4467	return 0;
4468#else
4469	return pci_set_mwi(dev);
4470#endif
4471}
4472EXPORT_SYMBOL(pci_try_set_mwi);
4473
4474/**
4475 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4476 * @dev: the PCI device to disable
4477 *
4478 * Disables PCI Memory-Write-Invalidate transaction on the device
4479 */
4480void pci_clear_mwi(struct pci_dev *dev)
 
4481{
4482#ifndef PCI_DISABLE_MWI
4483	u16 cmd;
4484
4485	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4486	if (cmd & PCI_COMMAND_INVALIDATE) {
4487		cmd &= ~PCI_COMMAND_INVALIDATE;
4488		pci_write_config_word(dev, PCI_COMMAND, cmd);
4489	}
4490#endif
4491}
4492EXPORT_SYMBOL(pci_clear_mwi);
4493
4494/**
4495 * pci_disable_parity - disable parity checking for device
4496 * @dev: the PCI device to operate on
4497 *
4498 * Disable parity checking for device @dev
4499 */
4500void pci_disable_parity(struct pci_dev *dev)
4501{
4502	u16 cmd;
4503
4504	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4505	if (cmd & PCI_COMMAND_PARITY) {
4506		cmd &= ~PCI_COMMAND_PARITY;
4507		pci_write_config_word(dev, PCI_COMMAND, cmd);
4508	}
4509}
 
4510
4511/**
4512 * pci_intx - enables/disables PCI INTx for device dev
4513 * @pdev: the PCI device to operate on
4514 * @enable: boolean: whether to enable or disable PCI INTx
4515 *
4516 * Enables/disables PCI INTx for device @pdev
4517 */
4518void pci_intx(struct pci_dev *pdev, int enable)
 
4519{
4520	u16 pci_command, new;
4521
4522	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4523
4524	if (enable)
4525		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4526	else
4527		new = pci_command | PCI_COMMAND_INTX_DISABLE;
 
4528
4529	if (new != pci_command) {
4530		struct pci_devres *dr;
4531
4532		pci_write_config_word(pdev, PCI_COMMAND, new);
4533
4534		dr = find_pci_dr(pdev);
4535		if (dr && !dr->restore_intx) {
4536			dr->restore_intx = 1;
4537			dr->orig_intx = !enable;
4538		}
4539	}
4540}
4541EXPORT_SYMBOL_GPL(pci_intx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4542
4543static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4544{
4545	struct pci_bus *bus = dev->bus;
4546	bool mask_updated = true;
4547	u32 cmd_status_dword;
4548	u16 origcmd, newcmd;
4549	unsigned long flags;
4550	bool irq_pending;
4551
4552	/*
4553	 * We do a single dword read to retrieve both command and status.
4554	 * Document assumptions that make this possible.
4555	 */
4556	BUILD_BUG_ON(PCI_COMMAND % 4);
4557	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4558
4559	raw_spin_lock_irqsave(&pci_lock, flags);
4560
4561	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4562
4563	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4564
4565	/*
4566	 * Check interrupt status register to see whether our device
4567	 * triggered the interrupt (when masking) or the next IRQ is
4568	 * already pending (when unmasking).
4569	 */
4570	if (mask != irq_pending) {
4571		mask_updated = false;
4572		goto done;
4573	}
4574
4575	origcmd = cmd_status_dword;
4576	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4577	if (mask)
4578		newcmd |= PCI_COMMAND_INTX_DISABLE;
4579	if (newcmd != origcmd)
4580		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4581
4582done:
4583	raw_spin_unlock_irqrestore(&pci_lock, flags);
4584
4585	return mask_updated;
4586}
4587
4588/**
4589 * pci_check_and_mask_intx - mask INTx on pending interrupt
4590 * @dev: the PCI device to operate on
4591 *
4592 * Check if the device dev has its INTx line asserted, mask it and return
4593 * true in that case. False is returned if no interrupt was pending.
 
4594 */
4595bool pci_check_and_mask_intx(struct pci_dev *dev)
4596{
4597	return pci_check_and_set_intx_mask(dev, true);
4598}
4599EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4600
4601/**
4602 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4603 * @dev: the PCI device to operate on
4604 *
4605 * Check if the device dev has its INTx line asserted, unmask it if not and
4606 * return true. False is returned and the mask remains active if there was
4607 * still an interrupt pending.
4608 */
4609bool pci_check_and_unmask_intx(struct pci_dev *dev)
4610{
4611	return pci_check_and_set_intx_mask(dev, false);
4612}
4613EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4614
4615/**
4616 * pci_wait_for_pending_transaction - wait for pending transaction
4617 * @dev: the PCI device to operate on
4618 *
4619 * Return 0 if transaction is pending 1 otherwise.
 
 
4620 */
4621int pci_wait_for_pending_transaction(struct pci_dev *dev)
4622{
4623	if (!pci_is_pcie(dev))
4624		return 1;
4625
4626	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4627				    PCI_EXP_DEVSTA_TRPND);
 
 
 
 
 
 
 
 
 
 
4628}
4629EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4630
4631/**
4632 * pcie_has_flr - check if a device supports function level resets
4633 * @dev: device to check
4634 *
4635 * Returns true if the device advertises support for PCIe function level
4636 * resets.
4637 */
4638bool pcie_has_flr(struct pci_dev *dev)
4639{
4640	u32 cap;
 
 
4641
4642	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4643		return false;
4644
4645	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4646	return cap & PCI_EXP_DEVCAP_FLR;
4647}
4648EXPORT_SYMBOL_GPL(pcie_has_flr);
4649
4650/**
4651 * pcie_flr - initiate a PCIe function level reset
4652 * @dev: device to reset
4653 *
4654 * Initiate a function level reset on @dev.  The caller should ensure the
4655 * device supports FLR before calling this function, e.g. by using the
4656 * pcie_has_flr() helper.
4657 */
4658int pcie_flr(struct pci_dev *dev)
4659{
4660	if (!pci_wait_for_pending_transaction(dev))
4661		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
 
 
 
 
 
 
4662
4663	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
 
 
4664
4665	if (dev->imm_ready)
4666		return 0;
4667
4668	/*
4669	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4670	 * 100ms, but may silently discard requests while the FLR is in
4671	 * progress.  Wait 100ms before trying to access the device.
4672	 */
 
 
 
 
 
 
 
 
 
 
 
 
 
4673	msleep(100);
4674
4675	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4676}
4677EXPORT_SYMBOL_GPL(pcie_flr);
4678
4679static int pci_af_flr(struct pci_dev *dev, int probe)
4680{
 
4681	int pos;
4682	u8 cap;
 
4683
4684	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4685	if (!pos)
4686		return -ENOTTY;
4687
4688	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4689		return -ENOTTY;
4690
4691	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4692	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4693		return -ENOTTY;
4694
4695	if (probe)
4696		return 0;
4697
4698	/*
4699	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4700	 * is used, so we use the control offset rather than status and shift
4701	 * the test bit to match.
4702	 */
4703	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4704				 PCI_AF_STATUS_TP << 8))
4705		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4706
4707	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
 
 
 
4708
4709	if (dev->imm_ready)
4710		return 0;
4711
4712	/*
4713	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4714	 * updated 27 July 2006; a device must complete an FLR within
4715	 * 100ms, but may silently discard requests while the FLR is in
4716	 * progress.  Wait 100ms before trying to access the device.
4717	 */
4718	msleep(100);
4719
4720	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4721}
4722
4723/**
4724 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4725 * @dev: Device to reset.
4726 * @probe: If set, only check if the device can be reset this way.
4727 *
4728 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4729 * unset, it will be reinitialized internally when going from PCI_D3hot to
4730 * PCI_D0.  If that's the case and the device is not in a low-power state
4731 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4732 *
4733 * NOTE: This causes the caller to sleep for twice the device power transition
4734 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4735 * by default (i.e. unless the @dev's d3hot_delay field has a different value).
4736 * Moreover, only devices in D0 can be reset by this function.
4737 */
4738static int pci_pm_reset(struct pci_dev *dev, int probe)
4739{
4740	u16 csr;
4741
4742	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4743		return -ENOTTY;
4744
4745	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4746	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4747		return -ENOTTY;
4748
4749	if (probe)
4750		return 0;
4751
4752	if (dev->current_state != PCI_D0)
4753		return -EINVAL;
4754
4755	csr &= ~PCI_PM_CTRL_STATE_MASK;
4756	csr |= PCI_D3hot;
4757	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4758	pci_dev_d3_sleep(dev);
4759
4760	csr &= ~PCI_PM_CTRL_STATE_MASK;
4761	csr |= PCI_D0;
4762	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4763	pci_dev_d3_sleep(dev);
4764
4765	return pci_dev_wait(dev, "PM D3hot->D0", PCIE_RESET_READY_POLL_MS);
4766}
4767
4768/**
4769 * pcie_wait_for_link_delay - Wait until link is active or inactive
4770 * @pdev: Bridge device
4771 * @active: waiting for active or inactive?
4772 * @delay: Delay to wait after link has become active (in ms)
4773 *
4774 * Use this to wait till link becomes active or inactive.
4775 */
4776static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
4777				     int delay)
4778{
4779	int timeout = 1000;
4780	bool ret;
4781	u16 lnk_status;
4782
4783	/*
4784	 * Some controllers might not implement link active reporting. In this
4785	 * case, we wait for 1000 ms + any delay requested by the caller.
4786	 */
4787	if (!pdev->link_active_reporting) {
4788		msleep(timeout + delay);
4789		return true;
4790	}
4791
4792	/*
4793	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4794	 * after which we should expect an link active if the reset was
4795	 * successful. If so, software must wait a minimum 100ms before sending
4796	 * configuration requests to devices downstream this port.
4797	 *
4798	 * If the link fails to activate, either the device was physically
4799	 * removed or the link is permanently failed.
4800	 */
4801	if (active)
4802		msleep(20);
4803	for (;;) {
4804		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4805		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4806		if (ret == active)
4807			break;
4808		if (timeout <= 0)
4809			break;
4810		msleep(10);
4811		timeout -= 10;
4812	}
4813	if (active && ret)
4814		msleep(delay);
4815
4816	return ret == active;
4817}
4818
4819/**
4820 * pcie_wait_for_link - Wait until link is active or inactive
4821 * @pdev: Bridge device
4822 * @active: waiting for active or inactive?
4823 *
4824 * Use this to wait till link becomes active or inactive.
4825 */
4826bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4827{
4828	return pcie_wait_for_link_delay(pdev, active, 100);
4829}
4830
4831/*
4832 * Find maximum D3cold delay required by all the devices on the bus.  The
4833 * spec says 100 ms, but firmware can lower it and we allow drivers to
4834 * increase it as well.
4835 *
4836 * Called with @pci_bus_sem locked for reading.
4837 */
4838static int pci_bus_max_d3cold_delay(const struct pci_bus *bus)
4839{
4840	const struct pci_dev *pdev;
4841	int min_delay = 100;
4842	int max_delay = 0;
4843
4844	list_for_each_entry(pdev, &bus->devices, bus_list) {
4845		if (pdev->d3cold_delay < min_delay)
4846			min_delay = pdev->d3cold_delay;
4847		if (pdev->d3cold_delay > max_delay)
4848			max_delay = pdev->d3cold_delay;
4849	}
4850
4851	return max(min_delay, max_delay);
4852}
4853
4854/**
4855 * pci_bridge_wait_for_secondary_bus - Wait for secondary bus to be accessible
4856 * @dev: PCI bridge
4857 *
4858 * Handle necessary delays before access to the devices on the secondary
4859 * side of the bridge are permitted after D3cold to D0 transition.
4860 *
4861 * For PCIe this means the delays in PCIe 5.0 section 6.6.1. For
4862 * conventional PCI it means Tpvrh + Trhfa specified in PCI 3.0 section
4863 * 4.3.2.
4864 */
4865void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
4866{
4867	struct pci_dev *child;
4868	int delay;
4869
4870	if (pci_dev_is_disconnected(dev))
4871		return;
4872
4873	if (!pci_is_bridge(dev) || !dev->bridge_d3)
4874		return;
4875
4876	down_read(&pci_bus_sem);
4877
4878	/*
4879	 * We only deal with devices that are present currently on the bus.
4880	 * For any hot-added devices the access delay is handled in pciehp
4881	 * board_added(). In case of ACPI hotplug the firmware is expected
4882	 * to configure the devices before OS is notified.
4883	 */
4884	if (!dev->subordinate || list_empty(&dev->subordinate->devices)) {
4885		up_read(&pci_bus_sem);
4886		return;
4887	}
4888
4889	/* Take d3cold_delay requirements into account */
4890	delay = pci_bus_max_d3cold_delay(dev->subordinate);
4891	if (!delay) {
4892		up_read(&pci_bus_sem);
4893		return;
4894	}
4895
4896	child = list_first_entry(&dev->subordinate->devices, struct pci_dev,
4897				 bus_list);
4898	up_read(&pci_bus_sem);
4899
4900	/*
4901	 * Conventional PCI and PCI-X we need to wait Tpvrh + Trhfa before
4902	 * accessing the device after reset (that is 1000 ms + 100 ms). In
4903	 * practice this should not be needed because we don't do power
4904	 * management for them (see pci_bridge_d3_possible()).
4905	 */
4906	if (!pci_is_pcie(dev)) {
4907		pci_dbg(dev, "waiting %d ms for secondary bus\n", 1000 + delay);
4908		msleep(1000 + delay);
4909		return;
4910	}
4911
4912	/*
4913	 * For PCIe downstream and root ports that do not support speeds
4914	 * greater than 5 GT/s need to wait minimum 100 ms. For higher
4915	 * speeds (gen3) we need to wait first for the data link layer to
4916	 * become active.
4917	 *
4918	 * However, 100 ms is the minimum and the PCIe spec says the
4919	 * software must allow at least 1s before it can determine that the
4920	 * device that did not respond is a broken device. There is
4921	 * evidence that 100 ms is not always enough, for example certain
4922	 * Titan Ridge xHCI controller does not always respond to
4923	 * configuration requests if we only wait for 100 ms (see
4924	 * https://bugzilla.kernel.org/show_bug.cgi?id=203885).
4925	 *
4926	 * Therefore we wait for 100 ms and check for the device presence.
4927	 * If it is still not present give it an additional 100 ms.
4928	 */
4929	if (!pcie_downstream_port(dev))
4930		return;
4931
4932	if (pcie_get_speed_cap(dev) <= PCIE_SPEED_5_0GT) {
4933		pci_dbg(dev, "waiting %d ms for downstream link\n", delay);
4934		msleep(delay);
4935	} else {
4936		pci_dbg(dev, "waiting %d ms for downstream link, after activation\n",
4937			delay);
4938		if (!pcie_wait_for_link_delay(dev, true, delay)) {
4939			/* Did not train, no need to wait any further */
4940			pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
4941			return;
4942		}
4943	}
4944
4945	if (!pci_device_is_present(child)) {
4946		pci_dbg(child, "waiting additional %d ms to become accessible\n", delay);
4947		msleep(delay);
4948	}
4949}
4950
4951void pci_reset_secondary_bus(struct pci_dev *dev)
4952{
4953	u16 ctrl;
4954
4955	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4956	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4957	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4958
4959	/*
4960	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4961	 * this to 2ms to ensure that we meet the minimum requirement.
4962	 */
4963	msleep(2);
4964
4965	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4966	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4967
4968	/*
4969	 * Trhfa for conventional PCI is 2^25 clock cycles.
4970	 * Assuming a minimum 33MHz clock this results in a 1s
4971	 * delay before we can consider subordinate devices to
4972	 * be re-initialized.  PCIe has some ways to shorten this,
4973	 * but we don't make use of them yet.
4974	 */
4975	ssleep(1);
4976}
4977
4978void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4979{
4980	pci_reset_secondary_bus(dev);
4981}
4982
4983/**
4984 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4985 * @dev: Bridge device
4986 *
4987 * Use the bridge control register to assert reset on the secondary bus.
4988 * Devices on the secondary bus are left in power-on state.
4989 */
4990int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4991{
4992	pcibios_reset_secondary_bus(dev);
4993
4994	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4995}
4996EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4997
4998static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4999{
5000	struct pci_dev *pdev;
5001
5002	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
5003	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5004		return -ENOTTY;
5005
5006	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
5007		if (pdev != dev)
5008			return -ENOTTY;
5009
5010	if (probe)
5011		return 0;
5012
5013	return pci_bridge_secondary_bus_reset(dev->bus->self);
 
 
 
 
 
 
 
 
 
5014}
5015
5016static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
5017{
5018	int rc = -ENOTTY;
5019
5020	if (!hotplug || !try_module_get(hotplug->owner))
5021		return rc;
5022
5023	if (hotplug->ops->reset_slot)
5024		rc = hotplug->ops->reset_slot(hotplug, probe);
 
5025
5026	module_put(hotplug->owner);
 
 
5027
5028	return rc;
5029}
 
5030
5031static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
5032{
5033	if (dev->multifunction || dev->subordinate || !dev->slot ||
5034	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
5035		return -ENOTTY;
5036
5037	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
 
 
5038}
5039
5040static int pci_reset_bus_function(struct pci_dev *dev, int probe)
5041{
5042	int rc;
5043
5044	rc = pci_dev_reset_slot_function(dev, probe);
5045	if (rc != -ENOTTY)
5046		return rc;
5047	return pci_parent_bus_reset(dev, probe);
5048}
5049
5050static void pci_dev_lock(struct pci_dev *dev)
5051{
5052	pci_cfg_access_lock(dev);
5053	/* block PM suspend, driver probe, etc. */
5054	device_lock(&dev->dev);
5055}
5056
5057/* Return 1 on successful lock, 0 on contention */
5058int pci_dev_trylock(struct pci_dev *dev)
5059{
5060	if (pci_cfg_access_trylock(dev)) {
5061		if (device_trylock(&dev->dev))
5062			return 1;
5063		pci_cfg_access_unlock(dev);
5064	}
5065
5066	return 0;
5067}
5068EXPORT_SYMBOL_GPL(pci_dev_trylock);
5069
5070void pci_dev_unlock(struct pci_dev *dev)
5071{
5072	device_unlock(&dev->dev);
5073	pci_cfg_access_unlock(dev);
5074}
5075EXPORT_SYMBOL_GPL(pci_dev_unlock);
5076
5077static void pci_dev_save_and_disable(struct pci_dev *dev)
 
 
 
 
 
 
 
 
5078{
5079	const struct pci_error_handlers *err_handler =
5080			dev->driver ? dev->driver->err_handler : NULL;
5081
5082	/*
5083	 * dev->driver->err_handler->reset_prepare() is protected against
5084	 * races with ->remove() by the device lock, which must be held by
5085	 * the caller.
5086	 */
5087	if (err_handler && err_handler->reset_prepare)
5088		err_handler->reset_prepare(dev);
5089
5090	/*
5091	 * Wake-up device prior to save.  PM registers default to D0 after
5092	 * reset and a simple register restore doesn't reliably return
5093	 * to a non-D0 state anyway.
5094	 */
5095	pci_set_power_state(dev, PCI_D0);
5096
5097	pci_save_state(dev);
5098	/*
5099	 * Disable the device by clearing the Command register, except for
5100	 * INTx-disable which is set.  This not only disables MMIO and I/O port
5101	 * BARs, but also prevents the device from being Bus Master, preventing
5102	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
5103	 * compliant devices, INTx-disable prevents legacy interrupts.
5104	 */
5105	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
5106}
5107
5108static void pci_dev_restore(struct pci_dev *dev)
5109{
5110	const struct pci_error_handlers *err_handler =
5111			dev->driver ? dev->driver->err_handler : NULL;
5112
5113	pci_restore_state(dev);
5114
5115	/*
5116	 * dev->driver->err_handler->reset_done() is protected against
5117	 * races with ->remove() by the device lock, which must be held by
5118	 * the caller.
5119	 */
5120	if (err_handler && err_handler->reset_done)
5121		err_handler->reset_done(dev);
5122}
 
5123
5124/**
5125 * __pci_reset_function_locked - reset a PCI device function while holding
5126 * the @dev mutex lock.
5127 * @dev: PCI device to reset
5128 *
5129 * Some devices allow an individual function to be reset without affecting
5130 * other functions in the same device.  The PCI device must be responsive
5131 * to PCI config space in order to use this function.
5132 *
5133 * The device function is presumed to be unused and the caller is holding
5134 * the device mutex lock when this function is called.
5135 *
5136 * Resetting the device will make the contents of PCI configuration space
5137 * random, so any caller of this must be prepared to reinitialise the
5138 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
5139 * etc.
5140 *
5141 * Returns 0 if the device function was successfully reset or negative if the
5142 * device doesn't support resetting a single function.
5143 */
5144int __pci_reset_function_locked(struct pci_dev *dev)
5145{
5146	int rc;
5147
5148	might_sleep();
5149
5150	/*
5151	 * A reset method returns -ENOTTY if it doesn't support this device
5152	 * and we should try the next method.
5153	 *
5154	 * If it returns 0 (success), we're finished.  If it returns any
5155	 * other error, we're also finished: this indicates that further
5156	 * reset mechanisms might be broken on the device.
5157	 */
5158	rc = pci_dev_specific_reset(dev, 0);
5159	if (rc != -ENOTTY)
5160		return rc;
5161	if (pcie_has_flr(dev)) {
5162		rc = pcie_flr(dev);
5163		if (rc != -ENOTTY)
5164			return rc;
5165	}
5166	rc = pci_af_flr(dev, 0);
5167	if (rc != -ENOTTY)
5168		return rc;
5169	rc = pci_pm_reset(dev, 0);
5170	if (rc != -ENOTTY)
5171		return rc;
5172	return pci_reset_bus_function(dev, 0);
5173}
5174EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
5175
5176/**
5177 * pci_probe_reset_function - check whether the device can be safely reset
5178 * @dev: PCI device to reset
5179 *
5180 * Some devices allow an individual function to be reset without affecting
5181 * other functions in the same device.  The PCI device must be responsive
5182 * to PCI config space in order to use this function.
5183 *
5184 * Returns 0 if the device function can be reset or negative if the
5185 * device doesn't support resetting a single function.
5186 */
5187int pci_probe_reset_function(struct pci_dev *dev)
5188{
5189	int rc;
5190
5191	might_sleep();
5192
5193	rc = pci_dev_specific_reset(dev, 1);
5194	if (rc != -ENOTTY)
5195		return rc;
5196	if (pcie_has_flr(dev))
5197		return 0;
5198	rc = pci_af_flr(dev, 1);
5199	if (rc != -ENOTTY)
5200		return rc;
5201	rc = pci_pm_reset(dev, 1);
5202	if (rc != -ENOTTY)
5203		return rc;
5204
5205	return pci_reset_bus_function(dev, 1);
5206}
5207
5208/**
5209 * pci_reset_function - quiesce and reset a PCI device function
5210 * @dev: PCI device to reset
5211 *
5212 * Some devices allow an individual function to be reset without affecting
5213 * other functions in the same device.  The PCI device must be responsive
5214 * to PCI config space in order to use this function.
5215 *
5216 * This function does not just reset the PCI portion of a device, but
5217 * clears all the state associated with the device.  This function differs
5218 * from __pci_reset_function_locked() in that it saves and restores device state
5219 * over the reset and takes the PCI device lock.
5220 *
5221 * Returns 0 if the device function was successfully reset or negative if the
5222 * device doesn't support resetting a single function.
5223 */
5224int pci_reset_function(struct pci_dev *dev)
5225{
5226	int rc;
5227
5228	if (!dev->reset_fn)
5229		return -ENOTTY;
5230
5231	pci_dev_lock(dev);
5232	pci_dev_save_and_disable(dev);
5233
5234	rc = __pci_reset_function_locked(dev);
5235
5236	pci_dev_restore(dev);
5237	pci_dev_unlock(dev);
5238
5239	return rc;
5240}
5241EXPORT_SYMBOL_GPL(pci_reset_function);
5242
5243/**
5244 * pci_reset_function_locked - quiesce and reset a PCI device function
5245 * @dev: PCI device to reset
5246 *
5247 * Some devices allow an individual function to be reset without affecting
5248 * other functions in the same device.  The PCI device must be responsive
5249 * to PCI config space in order to use this function.
5250 *
5251 * This function does not just reset the PCI portion of a device, but
5252 * clears all the state associated with the device.  This function differs
5253 * from __pci_reset_function_locked() in that it saves and restores device state
5254 * over the reset.  It also differs from pci_reset_function() in that it
5255 * requires the PCI device lock to be held.
5256 *
5257 * Returns 0 if the device function was successfully reset or negative if the
5258 * device doesn't support resetting a single function.
5259 */
5260int pci_reset_function_locked(struct pci_dev *dev)
5261{
5262	int rc;
5263
5264	if (!dev->reset_fn)
5265		return -ENOTTY;
5266
5267	pci_dev_save_and_disable(dev);
5268
5269	rc = __pci_reset_function_locked(dev);
5270
5271	pci_dev_restore(dev);
5272
5273	return rc;
5274}
5275EXPORT_SYMBOL_GPL(pci_reset_function_locked);
5276
5277/**
5278 * pci_try_reset_function - quiesce and reset a PCI device function
5279 * @dev: PCI device to reset
5280 *
5281 * Same as above, except return -EAGAIN if unable to lock device.
5282 */
5283int pci_try_reset_function(struct pci_dev *dev)
5284{
5285	int rc;
5286
5287	if (!dev->reset_fn)
5288		return -ENOTTY;
5289
5290	if (!pci_dev_trylock(dev))
5291		return -EAGAIN;
5292
5293	pci_dev_save_and_disable(dev);
5294	rc = __pci_reset_function_locked(dev);
5295	pci_dev_restore(dev);
5296	pci_dev_unlock(dev);
5297
5298	return rc;
5299}
5300EXPORT_SYMBOL_GPL(pci_try_reset_function);
5301
5302/* Do any devices on or below this bus prevent a bus reset? */
5303static bool pci_bus_resetable(struct pci_bus *bus)
5304{
5305	struct pci_dev *dev;
5306
5307
5308	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5309		return false;
5310
5311	list_for_each_entry(dev, &bus->devices, bus_list) {
5312		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5313		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5314			return false;
5315	}
5316
5317	return true;
5318}
5319
5320/* Lock devices from the top of the tree down */
5321static void pci_bus_lock(struct pci_bus *bus)
5322{
5323	struct pci_dev *dev;
5324
5325	list_for_each_entry(dev, &bus->devices, bus_list) {
5326		pci_dev_lock(dev);
5327		if (dev->subordinate)
5328			pci_bus_lock(dev->subordinate);
5329	}
5330}
5331
5332/* Unlock devices from the bottom of the tree up */
5333static void pci_bus_unlock(struct pci_bus *bus)
5334{
5335	struct pci_dev *dev;
5336
5337	list_for_each_entry(dev, &bus->devices, bus_list) {
5338		if (dev->subordinate)
5339			pci_bus_unlock(dev->subordinate);
5340		pci_dev_unlock(dev);
5341	}
5342}
5343
5344/* Return 1 on successful lock, 0 on contention */
5345static int pci_bus_trylock(struct pci_bus *bus)
5346{
5347	struct pci_dev *dev;
5348
5349	list_for_each_entry(dev, &bus->devices, bus_list) {
5350		if (!pci_dev_trylock(dev))
5351			goto unlock;
5352		if (dev->subordinate) {
5353			if (!pci_bus_trylock(dev->subordinate)) {
5354				pci_dev_unlock(dev);
5355				goto unlock;
5356			}
5357		}
5358	}
5359	return 1;
5360
5361unlock:
5362	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5363		if (dev->subordinate)
5364			pci_bus_unlock(dev->subordinate);
5365		pci_dev_unlock(dev);
5366	}
5367	return 0;
5368}
5369
5370/* Do any devices on or below this slot prevent a bus reset? */
5371static bool pci_slot_resetable(struct pci_slot *slot)
5372{
5373	struct pci_dev *dev;
5374
5375	if (slot->bus->self &&
5376	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5377		return false;
5378
5379	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5380		if (!dev->slot || dev->slot != slot)
5381			continue;
5382		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5383		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5384			return false;
5385	}
5386
5387	return true;
5388}
5389
5390/* Lock devices from the top of the tree down */
5391static void pci_slot_lock(struct pci_slot *slot)
5392{
5393	struct pci_dev *dev;
5394
5395	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5396		if (!dev->slot || dev->slot != slot)
5397			continue;
5398		pci_dev_lock(dev);
5399		if (dev->subordinate)
5400			pci_bus_lock(dev->subordinate);
5401	}
5402}
5403
5404/* Unlock devices from the bottom of the tree up */
5405static void pci_slot_unlock(struct pci_slot *slot)
5406{
5407	struct pci_dev *dev;
5408
5409	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5410		if (!dev->slot || dev->slot != slot)
5411			continue;
5412		if (dev->subordinate)
5413			pci_bus_unlock(dev->subordinate);
5414		pci_dev_unlock(dev);
5415	}
5416}
5417
5418/* Return 1 on successful lock, 0 on contention */
5419static int pci_slot_trylock(struct pci_slot *slot)
5420{
5421	struct pci_dev *dev;
5422
5423	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5424		if (!dev->slot || dev->slot != slot)
5425			continue;
5426		if (!pci_dev_trylock(dev))
5427			goto unlock;
5428		if (dev->subordinate) {
5429			if (!pci_bus_trylock(dev->subordinate)) {
5430				pci_dev_unlock(dev);
5431				goto unlock;
5432			}
5433		}
5434	}
5435	return 1;
5436
5437unlock:
5438	list_for_each_entry_continue_reverse(dev,
5439					     &slot->bus->devices, bus_list) {
5440		if (!dev->slot || dev->slot != slot)
5441			continue;
5442		if (dev->subordinate)
5443			pci_bus_unlock(dev->subordinate);
5444		pci_dev_unlock(dev);
5445	}
5446	return 0;
5447}
5448
5449/*
5450 * Save and disable devices from the top of the tree down while holding
5451 * the @dev mutex lock for the entire tree.
5452 */
5453static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5454{
5455	struct pci_dev *dev;
5456
5457	list_for_each_entry(dev, &bus->devices, bus_list) {
5458		pci_dev_save_and_disable(dev);
5459		if (dev->subordinate)
5460			pci_bus_save_and_disable_locked(dev->subordinate);
5461	}
5462}
5463
5464/*
5465 * Restore devices from top of the tree down while holding @dev mutex lock
5466 * for the entire tree.  Parent bridges need to be restored before we can
5467 * get to subordinate devices.
5468 */
5469static void pci_bus_restore_locked(struct pci_bus *bus)
5470{
5471	struct pci_dev *dev;
5472
5473	list_for_each_entry(dev, &bus->devices, bus_list) {
5474		pci_dev_restore(dev);
5475		if (dev->subordinate)
5476			pci_bus_restore_locked(dev->subordinate);
5477	}
5478}
5479
5480/*
5481 * Save and disable devices from the top of the tree down while holding
5482 * the @dev mutex lock for the entire tree.
5483 */
5484static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5485{
5486	struct pci_dev *dev;
5487
5488	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5489		if (!dev->slot || dev->slot != slot)
5490			continue;
5491		pci_dev_save_and_disable(dev);
5492		if (dev->subordinate)
5493			pci_bus_save_and_disable_locked(dev->subordinate);
5494	}
5495}
5496
5497/*
5498 * Restore devices from top of the tree down while holding @dev mutex lock
5499 * for the entire tree.  Parent bridges need to be restored before we can
5500 * get to subordinate devices.
5501 */
5502static void pci_slot_restore_locked(struct pci_slot *slot)
5503{
5504	struct pci_dev *dev;
5505
5506	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5507		if (!dev->slot || dev->slot != slot)
5508			continue;
5509		pci_dev_restore(dev);
5510		if (dev->subordinate)
5511			pci_bus_restore_locked(dev->subordinate);
5512	}
5513}
5514
5515static int pci_slot_reset(struct pci_slot *slot, int probe)
5516{
5517	int rc;
5518
5519	if (!slot || !pci_slot_resetable(slot))
5520		return -ENOTTY;
5521
5522	if (!probe)
5523		pci_slot_lock(slot);
5524
5525	might_sleep();
5526
5527	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5528
5529	if (!probe)
5530		pci_slot_unlock(slot);
5531
5532	return rc;
5533}
5534
5535/**
5536 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5537 * @slot: PCI slot to probe
5538 *
5539 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5540 */
5541int pci_probe_reset_slot(struct pci_slot *slot)
5542{
5543	return pci_slot_reset(slot, 1);
5544}
5545EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5546
5547/**
5548 * __pci_reset_slot - Try to reset a PCI slot
5549 * @slot: PCI slot to reset
5550 *
5551 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5552 * independent of other slots.  For instance, some slots may support slot power
5553 * control.  In the case of a 1:1 bus to slot architecture, this function may
5554 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5555 * Generally a slot reset should be attempted before a bus reset.  All of the
5556 * function of the slot and any subordinate buses behind the slot are reset
5557 * through this function.  PCI config space of all devices in the slot and
5558 * behind the slot is saved before and restored after reset.
5559 *
5560 * Same as above except return -EAGAIN if the slot cannot be locked
5561 */
5562static int __pci_reset_slot(struct pci_slot *slot)
5563{
5564	int rc;
5565
5566	rc = pci_slot_reset(slot, 1);
5567	if (rc)
5568		return rc;
5569
5570	if (pci_slot_trylock(slot)) {
5571		pci_slot_save_and_disable_locked(slot);
5572		might_sleep();
5573		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5574		pci_slot_restore_locked(slot);
5575		pci_slot_unlock(slot);
5576	} else
5577		rc = -EAGAIN;
5578
5579	return rc;
5580}
 
 
 
5581
5582static int pci_bus_reset(struct pci_bus *bus, int probe)
5583{
5584	int ret;
5585
5586	if (!bus->self || !pci_bus_resetable(bus))
5587		return -ENOTTY;
5588
5589	if (probe)
5590		return 0;
5591
5592	pci_bus_lock(bus);
5593
5594	might_sleep();
5595
5596	ret = pci_bridge_secondary_bus_reset(bus->self);
5597
5598	pci_bus_unlock(bus);
5599
5600	return ret;
5601}
5602
5603/**
5604 * pci_bus_error_reset - reset the bridge's subordinate bus
5605 * @bridge: The parent device that connects to the bus to reset
5606 *
5607 * This function will first try to reset the slots on this bus if the method is
5608 * available. If slot reset fails or is not available, this will fall back to a
5609 * secondary bus reset.
5610 */
5611int pci_bus_error_reset(struct pci_dev *bridge)
5612{
5613	struct pci_bus *bus = bridge->subordinate;
5614	struct pci_slot *slot;
5615
5616	if (!bus)
5617		return -ENOTTY;
5618
5619	mutex_lock(&pci_slot_mutex);
5620	if (list_empty(&bus->slots))
5621		goto bus_reset;
5622
5623	list_for_each_entry(slot, &bus->slots, list)
5624		if (pci_probe_reset_slot(slot))
5625			goto bus_reset;
5626
5627	list_for_each_entry(slot, &bus->slots, list)
5628		if (pci_slot_reset(slot, 0))
5629			goto bus_reset;
5630
5631	mutex_unlock(&pci_slot_mutex);
5632	return 0;
5633bus_reset:
5634	mutex_unlock(&pci_slot_mutex);
5635	return pci_bus_reset(bridge->subordinate, 0);
5636}
5637
5638/**
5639 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5640 * @bus: PCI bus to probe
5641 *
5642 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5643 */
5644int pci_probe_reset_bus(struct pci_bus *bus)
5645{
5646	return pci_bus_reset(bus, 1);
5647}
5648EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5649
5650/**
5651 * __pci_reset_bus - Try to reset a PCI bus
5652 * @bus: top level PCI bus to reset
5653 *
5654 * Same as above except return -EAGAIN if the bus cannot be locked
5655 */
5656static int __pci_reset_bus(struct pci_bus *bus)
5657{
5658	int rc;
5659
5660	rc = pci_bus_reset(bus, 1);
5661	if (rc)
5662		return rc;
5663
5664	if (pci_bus_trylock(bus)) {
5665		pci_bus_save_and_disable_locked(bus);
5666		might_sleep();
5667		rc = pci_bridge_secondary_bus_reset(bus->self);
5668		pci_bus_restore_locked(bus);
5669		pci_bus_unlock(bus);
5670	} else
5671		rc = -EAGAIN;
5672
5673	return rc;
5674}
5675
5676/**
5677 * pci_reset_bus - Try to reset a PCI bus
5678 * @pdev: top level PCI device to reset via slot/bus
5679 *
5680 * Same as above except return -EAGAIN if the bus cannot be locked
5681 */
5682int pci_reset_bus(struct pci_dev *pdev)
5683{
5684	return (!pci_probe_reset_slot(pdev->slot)) ?
5685	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5686}
5687EXPORT_SYMBOL_GPL(pci_reset_bus);
5688
5689/**
5690 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5691 * @dev: PCI device to query
5692 *
5693 * Returns mmrbc: maximum designed memory read count in bytes or
5694 * appropriate error value.
5695 */
5696int pcix_get_max_mmrbc(struct pci_dev *dev)
5697{
5698	int cap;
5699	u32 stat;
5700
5701	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5702	if (!cap)
5703		return -EINVAL;
5704
5705	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5706		return -EINVAL;
5707
5708	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5709}
5710EXPORT_SYMBOL(pcix_get_max_mmrbc);
5711
5712/**
5713 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5714 * @dev: PCI device to query
5715 *
5716 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5717 * value.
5718 */
5719int pcix_get_mmrbc(struct pci_dev *dev)
5720{
5721	int cap;
5722	u16 cmd;
5723
5724	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5725	if (!cap)
5726		return -EINVAL;
5727
5728	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5729		return -EINVAL;
5730
5731	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5732}
5733EXPORT_SYMBOL(pcix_get_mmrbc);
5734
5735/**
5736 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5737 * @dev: PCI device to query
5738 * @mmrbc: maximum memory read count in bytes
5739 *    valid values are 512, 1024, 2048, 4096
5740 *
5741 * If possible sets maximum memory read byte count, some bridges have errata
5742 * that prevent this.
5743 */
5744int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5745{
5746	int cap;
5747	u32 stat, v, o;
5748	u16 cmd;
5749
5750	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5751		return -EINVAL;
5752
5753	v = ffs(mmrbc) - 10;
5754
5755	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5756	if (!cap)
5757		return -EINVAL;
5758
5759	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5760		return -EINVAL;
5761
5762	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5763		return -E2BIG;
5764
5765	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5766		return -EINVAL;
5767
5768	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5769	if (o != v) {
5770		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
 
5771			return -EIO;
5772
5773		cmd &= ~PCI_X_CMD_MAX_READ;
5774		cmd |= v << 2;
5775		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5776			return -EIO;
5777	}
5778	return 0;
5779}
5780EXPORT_SYMBOL(pcix_set_mmrbc);
5781
5782/**
5783 * pcie_get_readrq - get PCI Express read request size
5784 * @dev: PCI device to query
5785 *
5786 * Returns maximum memory read request in bytes or appropriate error value.
 
5787 */
5788int pcie_get_readrq(struct pci_dev *dev)
5789{
 
5790	u16 ctl;
5791
5792	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
 
 
 
 
5793
5794	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5795}
5796EXPORT_SYMBOL(pcie_get_readrq);
5797
5798/**
5799 * pcie_set_readrq - set PCI Express maximum memory read request
5800 * @dev: PCI device to query
5801 * @rq: maximum memory read count in bytes
5802 *    valid values are 128, 256, 512, 1024, 2048, 4096
5803 *
5804 * If possible sets maximum memory read request in bytes
5805 */
5806int pcie_set_readrq(struct pci_dev *dev, int rq)
5807{
5808	u16 v;
5809	int ret;
5810
5811	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5812		return -EINVAL;
 
 
 
 
5813
 
 
 
5814	/*
5815	 * If using the "performance" PCIe config, we clamp the read rq
5816	 * size to the max packet size to keep the host bridge from
5817	 * generating requests larger than we can cope with.
 
5818	 */
5819	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5820		int mps = pcie_get_mps(dev);
5821
 
 
5822		if (mps < rq)
5823			rq = mps;
5824	}
5825
5826	v = (ffs(rq) - 8) << 12;
5827
5828	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5829						  PCI_EXP_DEVCTL_READRQ, v);
 
 
 
5830
5831	return pcibios_err_to_errno(ret);
 
5832}
5833EXPORT_SYMBOL(pcie_set_readrq);
5834
5835/**
5836 * pcie_get_mps - get PCI Express maximum payload size
5837 * @dev: PCI device to query
5838 *
5839 * Returns maximum payload size in bytes
 
5840 */
5841int pcie_get_mps(struct pci_dev *dev)
5842{
 
5843	u16 ctl;
5844
5845	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
5846
5847	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
 
 
 
 
5848}
5849EXPORT_SYMBOL(pcie_get_mps);
5850
5851/**
5852 * pcie_set_mps - set PCI Express maximum payload size
5853 * @dev: PCI device to query
5854 * @mps: maximum payload size in bytes
5855 *    valid values are 128, 256, 512, 1024, 2048, 4096
5856 *
5857 * If possible sets maximum payload size
5858 */
5859int pcie_set_mps(struct pci_dev *dev, int mps)
5860{
5861	u16 v;
5862	int ret;
5863
5864	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5865		return -EINVAL;
5866
5867	v = ffs(mps) - 8;
5868	if (v > dev->pcie_mpss)
5869		return -EINVAL;
5870	v <<= 5;
5871
5872	ret = pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5873						  PCI_EXP_DEVCTL_PAYLOAD, v);
 
5874
5875	return pcibios_err_to_errno(ret);
5876}
5877EXPORT_SYMBOL(pcie_set_mps);
5878
5879/**
5880 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5881 *			      device and its bandwidth limitation
5882 * @dev: PCI device to query
5883 * @limiting_dev: storage for device causing the bandwidth limitation
5884 * @speed: storage for speed of limiting device
5885 * @width: storage for width of limiting device
5886 *
5887 * Walk up the PCI device chain and find the point where the minimum
5888 * bandwidth is available.  Return the bandwidth available there and (if
5889 * limiting_dev, speed, and width pointers are supplied) information about
5890 * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5891 * raw bandwidth.
5892 */
5893u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5894			     enum pci_bus_speed *speed,
5895			     enum pcie_link_width *width)
5896{
5897	u16 lnksta;
5898	enum pci_bus_speed next_speed;
5899	enum pcie_link_width next_width;
5900	u32 bw, next_bw;
5901
5902	if (speed)
5903		*speed = PCI_SPEED_UNKNOWN;
5904	if (width)
5905		*width = PCIE_LNK_WIDTH_UNKNOWN;
5906
5907	bw = 0;
5908
5909	while (dev) {
5910		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5911
5912		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5913		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5914			PCI_EXP_LNKSTA_NLW_SHIFT;
5915
5916		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5917
5918		/* Check if current device limits the total bandwidth */
5919		if (!bw || next_bw <= bw) {
5920			bw = next_bw;
5921
5922			if (limiting_dev)
5923				*limiting_dev = dev;
5924			if (speed)
5925				*speed = next_speed;
5926			if (width)
5927				*width = next_width;
5928		}
5929
5930		dev = pci_upstream_bridge(dev);
 
 
 
5931	}
5932
5933	return bw;
5934}
5935EXPORT_SYMBOL(pcie_bandwidth_available);
5936
5937/**
5938 * pcie_get_speed_cap - query for the PCI device's link speed capability
5939 * @dev: PCI device to query
5940 *
5941 * Query the PCI device speed capability.  Return the maximum link speed
5942 * supported by the device.
5943 */
5944enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5945{
5946	u32 lnkcap2, lnkcap;
5947
5948	/*
5949	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
5950	 * implementation note there recommends using the Supported Link
5951	 * Speeds Vector in Link Capabilities 2 when supported.
5952	 *
5953	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5954	 * should use the Supported Link Speeds field in Link Capabilities,
5955	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5956	 */
5957	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5958
5959	/* PCIe r3.0-compliant */
5960	if (lnkcap2)
5961		return PCIE_LNKCAP2_SLS2SPEED(lnkcap2);
5962
5963	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5964	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5965		return PCIE_SPEED_5_0GT;
5966	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5967		return PCIE_SPEED_2_5GT;
5968
5969	return PCI_SPEED_UNKNOWN;
5970}
5971EXPORT_SYMBOL(pcie_get_speed_cap);
5972
5973/**
5974 * pcie_get_width_cap - query for the PCI device's link width capability
5975 * @dev: PCI device to query
5976 *
5977 * Query the PCI device width capability.  Return the maximum link width
5978 * supported by the device.
5979 */
5980enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5981{
5982	u32 lnkcap;
5983
5984	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5985	if (lnkcap)
5986		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5987
5988	return PCIE_LNK_WIDTH_UNKNOWN;
5989}
5990EXPORT_SYMBOL(pcie_get_width_cap);
5991
5992/**
5993 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5994 * @dev: PCI device
5995 * @speed: storage for link speed
5996 * @width: storage for link width
5997 *
5998 * Calculate a PCI device's link bandwidth by querying for its link speed
5999 * and width, multiplying them, and applying encoding overhead.  The result
6000 * is in Mb/s, i.e., megabits/second of raw bandwidth.
6001 */
6002u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
6003			   enum pcie_link_width *width)
6004{
6005	*speed = pcie_get_speed_cap(dev);
6006	*width = pcie_get_width_cap(dev);
6007
6008	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
6009		return 0;
6010
6011	return *width * PCIE_SPEED2MBS_ENC(*speed);
6012}
6013
6014/**
6015 * __pcie_print_link_status - Report the PCI device's link speed and width
6016 * @dev: PCI device to query
6017 * @verbose: Print info even when enough bandwidth is available
6018 *
6019 * If the available bandwidth at the device is less than the device is
6020 * capable of, report the device's maximum possible bandwidth and the
6021 * upstream link that limits its performance.  If @verbose, always print
6022 * the available bandwidth, even if the device isn't constrained.
6023 */
6024void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
6025{
6026	enum pcie_link_width width, width_cap;
6027	enum pci_bus_speed speed, speed_cap;
6028	struct pci_dev *limiting_dev = NULL;
6029	u32 bw_avail, bw_cap;
6030
6031	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
6032	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
6033
6034	if (bw_avail >= bw_cap && verbose)
6035		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
6036			 bw_cap / 1000, bw_cap % 1000,
6037			 pci_speed_string(speed_cap), width_cap);
6038	else if (bw_avail < bw_cap)
6039		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
6040			 bw_avail / 1000, bw_avail % 1000,
6041			 pci_speed_string(speed), width,
6042			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
6043			 bw_cap / 1000, bw_cap % 1000,
6044			 pci_speed_string(speed_cap), width_cap);
6045}
6046
6047/**
6048 * pcie_print_link_status - Report the PCI device's link speed and width
6049 * @dev: PCI device to query
6050 *
6051 * Report the available bandwidth at the device.
6052 */
6053void pcie_print_link_status(struct pci_dev *dev)
6054{
6055	__pcie_print_link_status(dev, true);
6056}
6057EXPORT_SYMBOL(pcie_print_link_status);
6058
6059/**
6060 * pci_select_bars - Make BAR mask from the type of resource
6061 * @dev: the PCI device for which BAR mask is made
6062 * @flags: resource type mask to be selected
6063 *
6064 * This helper routine makes bar mask from the type of resource.
6065 */
6066int pci_select_bars(struct pci_dev *dev, unsigned long flags)
6067{
6068	int i, bars = 0;
6069	for (i = 0; i < PCI_NUM_RESOURCES; i++)
6070		if (pci_resource_flags(dev, i) & flags)
6071			bars |= (1 << i);
6072	return bars;
6073}
6074EXPORT_SYMBOL(pci_select_bars);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6075
6076/* Some architectures require additional programming to enable VGA */
6077static arch_set_vga_state_t arch_set_vga_state;
6078
6079void __init pci_register_set_vga_state(arch_set_vga_state_t func)
6080{
6081	arch_set_vga_state = func;	/* NULL disables */
6082}
6083
6084static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
6085				  unsigned int command_bits, u32 flags)
6086{
6087	if (arch_set_vga_state)
6088		return arch_set_vga_state(dev, decode, command_bits,
6089						flags);
6090	return 0;
6091}
6092
6093/**
6094 * pci_set_vga_state - set VGA decode state on device and parents if requested
6095 * @dev: the PCI device
6096 * @decode: true = enable decoding, false = disable decoding
6097 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
6098 * @flags: traverse ancestors and change bridges
6099 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
6100 */
6101int pci_set_vga_state(struct pci_dev *dev, bool decode,
6102		      unsigned int command_bits, u32 flags)
6103{
6104	struct pci_bus *bus;
6105	struct pci_dev *bridge;
6106	u16 cmd;
6107	int rc;
6108
6109	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
6110
6111	/* ARCH specific VGA enables */
6112	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
6113	if (rc)
6114		return rc;
6115
6116	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
6117		pci_read_config_word(dev, PCI_COMMAND, &cmd);
6118		if (decode)
6119			cmd |= command_bits;
6120		else
6121			cmd &= ~command_bits;
6122		pci_write_config_word(dev, PCI_COMMAND, cmd);
6123	}
6124
6125	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
6126		return 0;
6127
6128	bus = dev->bus;
6129	while (bus) {
6130		bridge = bus->self;
6131		if (bridge) {
6132			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
6133					     &cmd);
6134			if (decode)
6135				cmd |= PCI_BRIDGE_CTL_VGA;
6136			else
6137				cmd &= ~PCI_BRIDGE_CTL_VGA;
6138			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
6139					      cmd);
6140		}
6141		bus = bus->parent;
6142	}
6143	return 0;
6144}
6145
6146#ifdef CONFIG_ACPI
6147bool pci_pr3_present(struct pci_dev *pdev)
6148{
6149	struct acpi_device *adev;
6150
6151	if (acpi_disabled)
6152		return false;
6153
6154	adev = ACPI_COMPANION(&pdev->dev);
6155	if (!adev)
6156		return false;
6157
6158	return adev->power.flags.power_resources &&
6159		acpi_has_method(adev->handle, "_PR3");
6160}
6161EXPORT_SYMBOL_GPL(pci_pr3_present);
6162#endif
6163
6164/**
6165 * pci_add_dma_alias - Add a DMA devfn alias for a device
6166 * @dev: the PCI device for which alias is added
6167 * @devfn_from: alias slot and function
6168 * @nr_devfns: number of subsequent devfns to alias
6169 *
6170 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
6171 * which is used to program permissible bus-devfn source addresses for DMA
6172 * requests in an IOMMU.  These aliases factor into IOMMU group creation
6173 * and are useful for devices generating DMA requests beyond or different
6174 * from their logical bus-devfn.  Examples include device quirks where the
6175 * device simply uses the wrong devfn, as well as non-transparent bridges
6176 * where the alias may be a proxy for devices in another domain.
6177 *
6178 * IOMMU group creation is performed during device discovery or addition,
6179 * prior to any potential DMA mapping and therefore prior to driver probing
6180 * (especially for userspace assigned devices where IOMMU group definition
6181 * cannot be left as a userspace activity).  DMA aliases should therefore
6182 * be configured via quirks, such as the PCI fixup header quirk.
6183 */
6184void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns)
6185{
6186	int devfn_to;
6187
6188	nr_devfns = min(nr_devfns, (unsigned) MAX_NR_DEVFNS - devfn_from);
6189	devfn_to = devfn_from + nr_devfns - 1;
6190
6191	if (!dev->dma_alias_mask)
6192		dev->dma_alias_mask = bitmap_zalloc(MAX_NR_DEVFNS, GFP_KERNEL);
6193	if (!dev->dma_alias_mask) {
6194		pci_warn(dev, "Unable to allocate DMA alias mask\n");
6195		return;
6196	}
6197
6198	bitmap_set(dev->dma_alias_mask, devfn_from, nr_devfns);
6199
6200	if (nr_devfns == 1)
6201		pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
6202				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from));
6203	else if (nr_devfns > 1)
6204		pci_info(dev, "Enabling fixed DMA alias for devfn range from %02x.%d to %02x.%d\n",
6205				PCI_SLOT(devfn_from), PCI_FUNC(devfn_from),
6206				PCI_SLOT(devfn_to), PCI_FUNC(devfn_to));
6207}
6208
6209bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
6210{
6211	return (dev1->dma_alias_mask &&
6212		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
6213	       (dev2->dma_alias_mask &&
6214		test_bit(dev1->devfn, dev2->dma_alias_mask)) ||
6215	       pci_real_dma_dev(dev1) == dev2 ||
6216	       pci_real_dma_dev(dev2) == dev1;
6217}
6218
6219bool pci_device_is_present(struct pci_dev *pdev)
6220{
6221	u32 v;
6222
6223	if (pci_dev_is_disconnected(pdev))
6224		return false;
6225	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
6226}
6227EXPORT_SYMBOL_GPL(pci_device_is_present);
6228
6229void pci_ignore_hotplug(struct pci_dev *dev)
6230{
6231	struct pci_dev *bridge = dev->bus->self;
6232
6233	dev->ignore_hotplug = 1;
6234	/* Propagate the "ignore hotplug" setting to the parent bridge. */
6235	if (bridge)
6236		bridge->ignore_hotplug = 1;
6237}
6238EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
6239
6240/**
6241 * pci_real_dma_dev - Get PCI DMA device for PCI device
6242 * @dev: the PCI device that may have a PCI DMA alias
6243 *
6244 * Permits the platform to provide architecture-specific functionality to
6245 * devices needing to alias DMA to another PCI device on another PCI bus. If
6246 * the PCI device is on the same bus, it is recommended to use
6247 * pci_add_dma_alias(). This is the default implementation. Architecture
6248 * implementations can override this.
6249 */
6250struct pci_dev __weak *pci_real_dma_dev(struct pci_dev *dev)
6251{
6252	return dev;
6253}
6254
6255resource_size_t __weak pcibios_default_alignment(void)
6256{
6257	return 0;
6258}
6259
6260/*
6261 * Arches that don't want to expose struct resource to userland as-is in
6262 * sysfs and /proc can implement their own pci_resource_to_user().
6263 */
6264void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
6265				 const struct resource *rsrc,
6266				 resource_size_t *start, resource_size_t *end)
6267{
6268	*start = rsrc->start;
6269	*end = rsrc->end;
6270}
6271
6272static char *resource_alignment_param;
6273static DEFINE_SPINLOCK(resource_alignment_lock);
6274
6275/**
6276 * pci_specified_resource_alignment - get resource alignment specified by user.
6277 * @dev: the PCI device to get
6278 * @resize: whether or not to change resources' size when reassigning alignment
6279 *
6280 * RETURNS: Resource alignment if it is specified.
6281 *          Zero if it is not specified.
6282 */
6283static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
6284							bool *resize)
6285{
6286	int align_order, count;
6287	resource_size_t align = pcibios_default_alignment();
6288	const char *p;
6289	int ret;
6290
6291	spin_lock(&resource_alignment_lock);
6292	p = resource_alignment_param;
6293	if (!p || !*p)
6294		goto out;
6295	if (pci_has_flag(PCI_PROBE_ONLY)) {
6296		align = 0;
6297		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
6298		goto out;
6299	}
6300
6301	while (*p) {
6302		count = 0;
6303		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
6304		    p[count] == '@') {
6305			p += count + 1;
6306			if (align_order > 63) {
6307				pr_err("PCI: Invalid requested alignment (order %d)\n",
6308				       align_order);
6309				align_order = PAGE_SHIFT;
 
 
 
 
 
 
 
 
6310			}
6311		} else {
6312			align_order = PAGE_SHIFT;
6313		}
6314
6315		ret = pci_dev_str_match(dev, p, &p);
6316		if (ret == 1) {
6317			*resize = true;
6318			align = 1ULL << align_order;
6319			break;
6320		} else if (ret < 0) {
6321			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
6322			       p);
 
 
6323			break;
6324		}
6325
6326		if (*p != ';' && *p != ',') {
6327			/* End of param or invalid format */
6328			break;
6329		}
6330		p++;
6331	}
6332out:
6333	spin_unlock(&resource_alignment_lock);
6334	return align;
6335}
6336
6337static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6338					   resource_size_t align, bool resize)
 
 
 
 
 
 
6339{
6340	struct resource *r = &dev->resource[bar];
6341	resource_size_t size;
6342
6343	if (!(r->flags & IORESOURCE_MEM))
6344		return;
6345
6346	if (r->flags & IORESOURCE_PCI_FIXED) {
6347		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6348			 bar, r, (unsigned long long)align);
6349		return;
6350	}
6351
6352	size = resource_size(r);
6353	if (size >= align)
6354		return;
6355
6356	/*
6357	 * Increase the alignment of the resource.  There are two ways we
6358	 * can do this:
6359	 *
6360	 * 1) Increase the size of the resource.  BARs are aligned on their
6361	 *    size, so when we reallocate space for this resource, we'll
6362	 *    allocate it with the larger alignment.  This also prevents
6363	 *    assignment of any other BARs inside the alignment region, so
6364	 *    if we're requesting page alignment, this means no other BARs
6365	 *    will share the page.
6366	 *
6367	 *    The disadvantage is that this makes the resource larger than
6368	 *    the hardware BAR, which may break drivers that compute things
6369	 *    based on the resource size, e.g., to find registers at a
6370	 *    fixed offset before the end of the BAR.
6371	 *
6372	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6373	 *    set r->start to the desired alignment.  By itself this
6374	 *    doesn't prevent other BARs being put inside the alignment
6375	 *    region, but if we realign *every* resource of every device in
6376	 *    the system, none of them will share an alignment region.
6377	 *
6378	 * When the user has requested alignment for only some devices via
6379	 * the "pci=resource_alignment" argument, "resize" is true and we
6380	 * use the first method.  Otherwise we assume we're aligning all
6381	 * devices and we use the second.
6382	 */
6383
6384	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6385		 bar, r, (unsigned long long)align);
6386
6387	if (resize) {
6388		r->start = 0;
6389		r->end = align - 1;
6390	} else {
6391		r->flags &= ~IORESOURCE_SIZEALIGN;
6392		r->flags |= IORESOURCE_STARTALIGN;
6393		r->start = align;
6394		r->end = r->start + size - 1;
6395	}
6396	r->flags |= IORESOURCE_UNSET;
6397}
6398
6399/*
6400 * This function disables memory decoding and releases memory resources
6401 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6402 * It also rounds up size to specified alignment.
6403 * Later on, the kernel will assign page-aligned memory resource back
6404 * to the device.
6405 */
6406void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6407{
6408	int i;
6409	struct resource *r;
6410	resource_size_t align;
6411	u16 command;
6412	bool resize = false;
6413
6414	/*
6415	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6416	 * 3.4.1.11.  Their resources are allocated from the space
6417	 * described by the VF BARx register in the PF's SR-IOV capability.
6418	 * We can't influence their alignment here.
6419	 */
6420	if (dev->is_virtfn)
6421		return;
6422
6423	/* check if specified PCI is target device to reassign */
6424	align = pci_specified_resource_alignment(dev, &resize);
6425	if (!align)
6426		return;
6427
6428	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6429	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6430		pci_warn(dev, "Can't reassign resources to host bridge\n");
 
6431		return;
6432	}
6433
 
 
6434	pci_read_config_word(dev, PCI_COMMAND, &command);
6435	command &= ~PCI_COMMAND_MEMORY;
6436	pci_write_config_word(dev, PCI_COMMAND, command);
6437
6438	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6439		pci_request_resource_alignment(dev, i, align, resize);
6440
6441	/*
6442	 * Need to disable bridge's resource window,
 
 
 
 
 
 
 
 
 
 
 
6443	 * to enable the kernel to reassign new resource
6444	 * window later on.
6445	 */
6446	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
 
6447		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6448			r = &dev->resource[i];
6449			if (!(r->flags & IORESOURCE_MEM))
6450				continue;
6451			r->flags |= IORESOURCE_UNSET;
6452			r->end = resource_size(r) - 1;
6453			r->start = 0;
6454		}
6455		pci_disable_bridge_window(dev);
6456	}
6457}
6458
6459static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6460{
6461	size_t count = 0;
6462
6463	spin_lock(&resource_alignment_lock);
6464	if (resource_alignment_param)
6465		count = sysfs_emit(buf, "%s\n", resource_alignment_param);
6466	spin_unlock(&resource_alignment_lock);
6467
6468	return count;
6469}
6470
6471static ssize_t resource_alignment_store(struct bus_type *bus,
6472					const char *buf, size_t count)
6473{
6474	char *param, *old, *end;
6475
6476	if (count >= (PAGE_SIZE - 1))
6477		return -EINVAL;
6478
6479	param = kstrndup(buf, count, GFP_KERNEL);
6480	if (!param)
6481		return -ENOMEM;
6482
6483	end = strchr(param, '\n');
6484	if (end)
6485		*end = '\0';
6486
6487	spin_lock(&resource_alignment_lock);
6488	old = resource_alignment_param;
6489	if (strlen(param)) {
6490		resource_alignment_param = param;
6491	} else {
6492		kfree(param);
6493		resource_alignment_param = NULL;
6494	}
6495	spin_unlock(&resource_alignment_lock);
 
 
6496
6497	kfree(old);
 
 
 
6498
6499	return count;
 
 
 
6500}
6501
6502static BUS_ATTR_RW(resource_alignment);
 
6503
6504static int __init pci_resource_alignment_sysfs_init(void)
6505{
6506	return bus_create_file(&pci_bus_type,
6507					&bus_attr_resource_alignment);
6508}
 
6509late_initcall(pci_resource_alignment_sysfs_init);
6510
6511static void pci_no_domains(void)
6512{
6513#ifdef CONFIG_PCI_DOMAINS
6514	pci_domains_supported = 0;
6515#endif
6516}
6517
6518#ifdef CONFIG_PCI_DOMAINS_GENERIC
6519static atomic_t __domain_nr = ATOMIC_INIT(-1);
6520
6521static int pci_get_new_domain_nr(void)
6522{
6523	return atomic_inc_return(&__domain_nr);
6524}
6525
6526static int of_pci_bus_find_domain_nr(struct device *parent)
6527{
6528	static int use_dt_domains = -1;
6529	int domain = -1;
6530
6531	if (parent)
6532		domain = of_get_pci_domain_nr(parent->of_node);
6533
6534	/*
6535	 * Check DT domain and use_dt_domains values.
6536	 *
6537	 * If DT domain property is valid (domain >= 0) and
6538	 * use_dt_domains != 0, the DT assignment is valid since this means
6539	 * we have not previously allocated a domain number by using
6540	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6541	 * 1, to indicate that we have just assigned a domain number from
6542	 * DT.
6543	 *
6544	 * If DT domain property value is not valid (ie domain < 0), and we
6545	 * have not previously assigned a domain number from DT
6546	 * (use_dt_domains != 1) we should assign a domain number by
6547	 * using the:
6548	 *
6549	 * pci_get_new_domain_nr()
6550	 *
6551	 * API and update the use_dt_domains value to keep track of method we
6552	 * are using to assign domain numbers (use_dt_domains = 0).
6553	 *
6554	 * All other combinations imply we have a platform that is trying
6555	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6556	 * which is a recipe for domain mishandling and it is prevented by
6557	 * invalidating the domain value (domain = -1) and printing a
6558	 * corresponding error.
6559	 */
6560	if (domain >= 0 && use_dt_domains) {
6561		use_dt_domains = 1;
6562	} else if (domain < 0 && use_dt_domains != 1) {
6563		use_dt_domains = 0;
6564		domain = pci_get_new_domain_nr();
6565	} else {
6566		if (parent)
6567			pr_err("Node %pOF has ", parent->of_node);
6568		pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6569		domain = -1;
6570	}
6571
6572	return domain;
6573}
6574
6575int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6576{
6577	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6578			       acpi_pci_bus_find_domain_nr(bus);
6579}
6580#endif
6581
6582/**
6583 * pci_ext_cfg_avail - can we access extended PCI config space?
 
6584 *
6585 * Returns 1 if we can access PCI extended config space (offsets
6586 * greater than 0xff). This is the default implementation. Architecture
6587 * implementations can override this.
6588 */
6589int __weak pci_ext_cfg_avail(void)
6590{
6591	return 1;
6592}
6593
6594void __weak pci_fixup_cardbus(struct pci_bus *bus)
6595{
6596}
6597EXPORT_SYMBOL(pci_fixup_cardbus);
6598
6599static int __init pci_setup(char *str)
6600{
6601	while (str) {
6602		char *k = strchr(str, ',');
6603		if (k)
6604			*k++ = 0;
6605		if (*str && (str = pcibios_setup(str)) && *str) {
6606			if (!strcmp(str, "nomsi")) {
6607				pci_no_msi();
6608			} else if (!strncmp(str, "noats", 5)) {
6609				pr_info("PCIe: ATS is disabled\n");
6610				pcie_ats_disabled = true;
6611			} else if (!strcmp(str, "noaer")) {
6612				pci_no_aer();
6613			} else if (!strcmp(str, "earlydump")) {
6614				pci_early_dump = true;
6615			} else if (!strncmp(str, "realloc=", 8)) {
6616				pci_realloc_get_opt(str + 8);
6617			} else if (!strncmp(str, "realloc", 7)) {
6618				pci_realloc_get_opt("on");
6619			} else if (!strcmp(str, "nodomains")) {
6620				pci_no_domains();
6621			} else if (!strncmp(str, "noari", 5)) {
6622				pcie_ari_disabled = true;
6623			} else if (!strncmp(str, "cbiosize=", 9)) {
6624				pci_cardbus_io_size = memparse(str + 9, &str);
6625			} else if (!strncmp(str, "cbmemsize=", 10)) {
6626				pci_cardbus_mem_size = memparse(str + 10, &str);
6627			} else if (!strncmp(str, "resource_alignment=", 19)) {
6628				resource_alignment_param = str + 19;
 
6629			} else if (!strncmp(str, "ecrc=", 5)) {
6630				pcie_ecrc_get_policy(str + 5);
6631			} else if (!strncmp(str, "hpiosize=", 9)) {
6632				pci_hotplug_io_size = memparse(str + 9, &str);
6633			} else if (!strncmp(str, "hpmmiosize=", 11)) {
6634				pci_hotplug_mmio_size = memparse(str + 11, &str);
6635			} else if (!strncmp(str, "hpmmioprefsize=", 15)) {
6636				pci_hotplug_mmio_pref_size = memparse(str + 15, &str);
6637			} else if (!strncmp(str, "hpmemsize=", 10)) {
6638				pci_hotplug_mmio_size = memparse(str + 10, &str);
6639				pci_hotplug_mmio_pref_size = pci_hotplug_mmio_size;
6640			} else if (!strncmp(str, "hpbussize=", 10)) {
6641				pci_hotplug_bus_size =
6642					simple_strtoul(str + 10, &str, 0);
6643				if (pci_hotplug_bus_size > 0xff)
6644					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6645			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6646				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6647			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6648				pcie_bus_config = PCIE_BUS_SAFE;
6649			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6650				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6651			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6652				pcie_bus_config = PCIE_BUS_PEER2PEER;
6653			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6654				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6655			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6656				disable_acs_redir_param = str + 18;
6657			} else {
6658				pr_err("PCI: Unknown option `%s'\n", str);
 
6659			}
6660		}
6661		str = k;
6662	}
6663	return 0;
6664}
6665early_param("pci", pci_setup);
6666
6667/*
6668 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6669 * in pci_setup(), above, to point to data in the __initdata section which
6670 * will be freed after the init sequence is complete. We can't allocate memory
6671 * in pci_setup() because some architectures do not have any memory allocation
6672 * service available during an early_param() call. So we allocate memory and
6673 * copy the variable here before the init section is freed.
6674 *
6675 */
6676static int __init pci_realloc_setup_params(void)
6677{
6678	resource_alignment_param = kstrdup(resource_alignment_param,
6679					   GFP_KERNEL);
6680	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
6681
6682	return 0;
6683}
6684pure_initcall(pci_realloc_setup_params);