Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v3.1
   1/*
   2 *	PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *	David Mosberger-Tang
   6 *
   7 *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
 
 
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
 
  25#include <asm/setup.h>
 
  26#include "pci.h"
  27
  28const char *pci_power_names[] = {
  29	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  30};
  31EXPORT_SYMBOL_GPL(pci_power_names);
  32
  33int isa_dma_bridge_buggy;
  34EXPORT_SYMBOL(isa_dma_bridge_buggy);
  35
  36int pci_pci_problems;
  37EXPORT_SYMBOL(pci_pci_problems);
  38
  39unsigned int pci_pm_d3_delay;
  40
  41static void pci_pme_list_scan(struct work_struct *work);
  42
  43static LIST_HEAD(pci_pme_list);
  44static DEFINE_MUTEX(pci_pme_list_mutex);
  45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  46
  47struct pci_pme_device {
  48	struct list_head list;
  49	struct pci_dev *dev;
  50};
  51
  52#define PME_TIMEOUT 1000 /* How long between PME checks */
  53
  54static void pci_dev_d3_sleep(struct pci_dev *dev)
  55{
  56	unsigned int delay = dev->d3_delay;
  57
  58	if (delay < pci_pm_d3_delay)
  59		delay = pci_pm_d3_delay;
  60
  61	msleep(delay);
  62}
  63
  64#ifdef CONFIG_PCI_DOMAINS
  65int pci_domains_supported = 1;
  66#endif
  67
  68#define DEFAULT_CARDBUS_IO_SIZE		(256)
  69#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  70/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  73
  74#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  75#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  76/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  77unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  79
  80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
  81
  82/*
  83 * The default CLS is used if arch didn't set CLS explicitly and not
  84 * all pci devices agree on the same value.  Arch can override either
  85 * the dfl or actual value as it sees fit.  Don't forget this is
  86 * measured in 32-bit words, not bytes.
  87 */
  88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  89u8 pci_cache_line_size;
  90
 
 
 
 
 
 
 
 
 
  91/**
  92 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
  93 * @bus: pointer to PCI bus structure to search
  94 *
  95 * Given a PCI bus, returns the highest PCI bus number present in the set
  96 * including the given PCI bus and its list of child PCI buses.
  97 */
  98unsigned char pci_bus_max_busnr(struct pci_bus* bus)
  99{
 100	struct list_head *tmp;
 101	unsigned char max, n;
 102
 103	max = bus->subordinate;
 104	list_for_each(tmp, &bus->children) {
 105		n = pci_bus_max_busnr(pci_bus_b(tmp));
 106		if(n > max)
 107			max = n;
 108	}
 109	return max;
 110}
 111EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 112
 113#ifdef CONFIG_HAS_IOMEM
 114void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 115{
 
 
 116	/*
 117	 * Make sure the BAR is actually a memory resource, not an IO resource
 118	 */
 119	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 120		WARN_ON(1);
 121		return NULL;
 122	}
 123	return ioremap_nocache(pci_resource_start(pdev, bar),
 124				     pci_resource_len(pdev, bar));
 125}
 126EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 127#endif
 128
 129#if 0
 130/**
 131 * pci_max_busnr - returns maximum PCI bus number
 132 *
 133 * Returns the highest PCI bus number present in the system global list of
 134 * PCI buses.
 135 */
 136unsigned char __devinit
 137pci_max_busnr(void)
 138{
 139	struct pci_bus *bus = NULL;
 140	unsigned char max, n;
 141
 142	max = 0;
 143	while ((bus = pci_find_next_bus(bus)) != NULL) {
 144		n = pci_bus_max_busnr(bus);
 145		if(n > max)
 146			max = n;
 147	}
 148	return max;
 
 149}
 
 
 150
 151#endif  /*  0  */
 152
 153#define PCI_FIND_CAP_TTL	48
 154
 155static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 156				   u8 pos, int cap, int *ttl)
 157{
 158	u8 id;
 
 
 
 159
 160	while ((*ttl)--) {
 161		pci_bus_read_config_byte(bus, devfn, pos, &pos);
 162		if (pos < 0x40)
 163			break;
 164		pos &= ~3;
 165		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 166					 &id);
 
 167		if (id == 0xff)
 168			break;
 169		if (id == cap)
 170			return pos;
 171		pos += PCI_CAP_LIST_NEXT;
 172	}
 173	return 0;
 174}
 175
 176static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 177			       u8 pos, int cap)
 178{
 179	int ttl = PCI_FIND_CAP_TTL;
 180
 181	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 182}
 183
 184int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 185{
 186	return __pci_find_next_cap(dev->bus, dev->devfn,
 187				   pos + PCI_CAP_LIST_NEXT, cap);
 188}
 189EXPORT_SYMBOL_GPL(pci_find_next_capability);
 190
 191static int __pci_bus_find_cap_start(struct pci_bus *bus,
 192				    unsigned int devfn, u8 hdr_type)
 193{
 194	u16 status;
 195
 196	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 197	if (!(status & PCI_STATUS_CAP_LIST))
 198		return 0;
 199
 200	switch (hdr_type) {
 201	case PCI_HEADER_TYPE_NORMAL:
 202	case PCI_HEADER_TYPE_BRIDGE:
 203		return PCI_CAPABILITY_LIST;
 204	case PCI_HEADER_TYPE_CARDBUS:
 205		return PCI_CB_CAPABILITY_LIST;
 206	default:
 207		return 0;
 208	}
 209
 210	return 0;
 211}
 212
 213/**
 214 * pci_find_capability - query for devices' capabilities 
 215 * @dev: PCI device to query
 216 * @cap: capability code
 217 *
 218 * Tell if a device supports a given PCI capability.
 219 * Returns the address of the requested capability structure within the
 220 * device's PCI configuration space or 0 in case the device does not
 221 * support it.  Possible values for @cap:
 222 *
 223 *  %PCI_CAP_ID_PM           Power Management 
 224 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 225 *  %PCI_CAP_ID_VPD          Vital Product Data 
 226 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 227 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 228 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 229 *  %PCI_CAP_ID_PCIX         PCI-X
 230 *  %PCI_CAP_ID_EXP          PCI Express
 231 */
 232int pci_find_capability(struct pci_dev *dev, int cap)
 233{
 234	int pos;
 235
 236	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 237	if (pos)
 238		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 239
 240	return pos;
 241}
 
 242
 243/**
 244 * pci_bus_find_capability - query for devices' capabilities 
 245 * @bus:   the PCI bus to query
 246 * @devfn: PCI device to query
 247 * @cap:   capability code
 248 *
 249 * Like pci_find_capability() but works for pci devices that do not have a
 250 * pci_dev structure set up yet. 
 251 *
 252 * Returns the address of the requested capability structure within the
 253 * device's PCI configuration space or 0 in case the device does not
 254 * support it.
 255 */
 256int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 257{
 258	int pos;
 259	u8 hdr_type;
 260
 261	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 262
 263	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 264	if (pos)
 265		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 266
 267	return pos;
 268}
 
 269
 270/**
 271 * pci_find_ext_capability - Find an extended capability
 272 * @dev: PCI device to query
 
 273 * @cap: capability code
 274 *
 275 * Returns the address of the requested extended capability structure
 276 * within the device's PCI configuration space or 0 if the device does
 277 * not support it.  Possible values for @cap:
 278 *
 279 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 280 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 281 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 282 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 283 */
 284int pci_find_ext_capability(struct pci_dev *dev, int cap)
 285{
 286	u32 header;
 287	int ttl;
 288	int pos = PCI_CFG_SPACE_SIZE;
 289
 290	/* minimum 8 bytes per capability */
 291	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 292
 293	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 294		return 0;
 295
 
 
 
 296	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 297		return 0;
 298
 299	/*
 300	 * If we have no capabilities, this is indicated by cap ID,
 301	 * cap version and next pointer all being 0.
 302	 */
 303	if (header == 0)
 304		return 0;
 305
 306	while (ttl-- > 0) {
 307		if (PCI_EXT_CAP_ID(header) == cap)
 308			return pos;
 309
 310		pos = PCI_EXT_CAP_NEXT(header);
 311		if (pos < PCI_CFG_SPACE_SIZE)
 312			break;
 313
 314		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 315			break;
 316	}
 317
 318	return 0;
 319}
 320EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 321
 322/**
 323 * pci_bus_find_ext_capability - find an extended capability
 324 * @bus:   the PCI bus to query
 325 * @devfn: PCI device to query
 326 * @cap:   capability code
 327 *
 328 * Like pci_find_ext_capability() but works for pci devices that do not have a
 329 * pci_dev structure set up yet.
 
 330 *
 331 * Returns the address of the requested capability structure within the
 332 * device's PCI configuration space or 0 in case the device does not
 333 * support it.
 
 334 */
 335int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
 336				int cap)
 337{
 338	u32 header;
 339	int ttl;
 340	int pos = PCI_CFG_SPACE_SIZE;
 341
 342	/* minimum 8 bytes per capability */
 343	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 344
 345	if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 346		return 0;
 347	if (header == 0xffffffff || header == 0)
 348		return 0;
 349
 350	while (ttl-- > 0) {
 351		if (PCI_EXT_CAP_ID(header) == cap)
 352			return pos;
 353
 354		pos = PCI_EXT_CAP_NEXT(header);
 355		if (pos < PCI_CFG_SPACE_SIZE)
 356			break;
 357
 358		if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 359			break;
 360	}
 361
 362	return 0;
 363}
 
 364
 365static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 366{
 367	int rc, ttl = PCI_FIND_CAP_TTL;
 368	u8 cap, mask;
 369
 370	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 371		mask = HT_3BIT_CAP_MASK;
 372	else
 373		mask = HT_5BIT_CAP_MASK;
 374
 375	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 376				      PCI_CAP_ID_HT, &ttl);
 377	while (pos) {
 378		rc = pci_read_config_byte(dev, pos + 3, &cap);
 379		if (rc != PCIBIOS_SUCCESSFUL)
 380			return 0;
 381
 382		if ((cap & mask) == ht_cap)
 383			return pos;
 384
 385		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 386					      pos + PCI_CAP_LIST_NEXT,
 387					      PCI_CAP_ID_HT, &ttl);
 388	}
 389
 390	return 0;
 391}
 392/**
 393 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 394 * @dev: PCI device to query
 395 * @pos: Position from which to continue searching
 396 * @ht_cap: Hypertransport capability code
 397 *
 398 * To be used in conjunction with pci_find_ht_capability() to search for
 399 * all capabilities matching @ht_cap. @pos should always be a value returned
 400 * from pci_find_ht_capability().
 401 *
 402 * NB. To be 100% safe against broken PCI devices, the caller should take
 403 * steps to avoid an infinite loop.
 404 */
 405int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 406{
 407	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 408}
 409EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 410
 411/**
 412 * pci_find_ht_capability - query a device's Hypertransport capabilities
 413 * @dev: PCI device to query
 414 * @ht_cap: Hypertransport capability code
 415 *
 416 * Tell if a device supports a given Hypertransport capability.
 417 * Returns an address within the device's PCI configuration space
 418 * or 0 in case the device does not support the request capability.
 419 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 420 * which has a Hypertransport capability matching @ht_cap.
 421 */
 422int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 423{
 424	int pos;
 425
 426	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 427	if (pos)
 428		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 429
 430	return pos;
 431}
 432EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 433
 434/**
 435 * pci_find_parent_resource - return resource region of parent bus of given region
 436 * @dev: PCI device structure contains resources to be searched
 437 * @res: child resource record for which parent is sought
 438 *
 439 *  For given resource region of given device, return the resource
 440 *  region of parent bus the given region is contained in or where
 441 *  it should be allocated from.
 442 */
 443struct resource *
 444pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 445{
 446	const struct pci_bus *bus = dev->bus;
 
 447	int i;
 448	struct resource *best = NULL, *r;
 449
 450	pci_bus_for_each_resource(bus, r, i) {
 451		if (!r)
 452			continue;
 453		if (res->start && !(res->start >= r->start && res->end <= r->end))
 454			continue;	/* Not contained */
 455		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 456			continue;	/* Wrong type */
 457		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 458			return r;	/* Exact match */
 459		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 460		if (r->flags & IORESOURCE_PREFETCH)
 461			continue;
 462		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
 463		if (!best)
 464			best = r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465	}
 466	return best;
 
 467}
 468
 469/**
 470 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 471 * @dev: PCI device to have its BARs restored
 472 *
 473 * Restore the BAR values for a given device, so as to make it
 474 * accessible by its driver.
 475 */
 476static void
 477pci_restore_bars(struct pci_dev *dev)
 478{
 479	int i;
 480
 
 
 
 
 481	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 482		pci_update_resource(dev, i);
 483}
 484
 485static struct pci_platform_pm_ops *pci_platform_pm;
 486
 487int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 488{
 489	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 490	    || !ops->sleep_wake || !ops->can_wakeup)
 491		return -EINVAL;
 492	pci_platform_pm = ops;
 493	return 0;
 494}
 495
 496static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 497{
 498	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 499}
 500
 501static inline int platform_pci_set_power_state(struct pci_dev *dev,
 502                                                pci_power_t t)
 503{
 504	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 505}
 506
 507static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 508{
 509	return pci_platform_pm ?
 510			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 511}
 512
 513static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 514{
 515	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 516}
 517
 518static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 519{
 520	return pci_platform_pm ?
 521			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 522}
 523
 524static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 525{
 526	return pci_platform_pm ?
 527			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 528}
 529
 
 
 
 
 
 530/**
 531 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 532 *                           given PCI device
 533 * @dev: PCI device to handle.
 534 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 535 *
 536 * RETURN VALUE:
 537 * -EINVAL if the requested state is invalid.
 538 * -EIO if device does not support PCI PM or its PM capabilities register has a
 539 * wrong version, or device doesn't support the requested state.
 540 * 0 if device already is in the requested state.
 541 * 0 if device's power state has been successfully changed.
 542 */
 543static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 544{
 545	u16 pmcsr;
 546	bool need_restore = false;
 547
 548	/* Check if we're already there */
 549	if (dev->current_state == state)
 550		return 0;
 551
 552	if (!dev->pm_cap)
 553		return -EIO;
 554
 555	if (state < PCI_D0 || state > PCI_D3hot)
 556		return -EINVAL;
 557
 558	/* Validate current state:
 559	 * Can enter D0 from any state, but if we can only go deeper 
 560	 * to sleep if we're already in a low power state
 561	 */
 562	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 563	    && dev->current_state > state) {
 564		dev_err(&dev->dev, "invalid power transition "
 565			"(from state %d to %d)\n", dev->current_state, state);
 566		return -EINVAL;
 567	}
 568
 569	/* check if this device supports the desired state */
 570	if ((state == PCI_D1 && !dev->d1_support)
 571	   || (state == PCI_D2 && !dev->d2_support))
 572		return -EIO;
 573
 574	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 575
 576	/* If we're (effectively) in D3, force entire word to 0.
 577	 * This doesn't affect PME_Status, disables PME_En, and
 578	 * sets PowerState to 0.
 579	 */
 580	switch (dev->current_state) {
 581	case PCI_D0:
 582	case PCI_D1:
 583	case PCI_D2:
 584		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 585		pmcsr |= state;
 586		break;
 587	case PCI_D3hot:
 588	case PCI_D3cold:
 589	case PCI_UNKNOWN: /* Boot-up */
 590		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 591		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 592			need_restore = true;
 593		/* Fall-through: force to D0 */
 594	default:
 595		pmcsr = 0;
 596		break;
 597	}
 598
 599	/* enter specified state */
 600	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 601
 602	/* Mandatory power management transition delays */
 603	/* see PCI PM 1.1 5.6.1 table 18 */
 604	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 605		pci_dev_d3_sleep(dev);
 606	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 607		udelay(PCI_PM_D2_DELAY);
 608
 609	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 610	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 611	if (dev->current_state != state && printk_ratelimit())
 612		dev_info(&dev->dev, "Refused to change power state, "
 613			"currently in D%d\n", dev->current_state);
 614
 615	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 
 616	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 617	 * from D3hot to D0 _may_ perform an internal reset, thereby
 618	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 619	 * For example, at least some versions of the 3c905B and the
 620	 * 3c556B exhibit this behaviour.
 621	 *
 622	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 623	 * devices in a D3hot state at boot.  Consequently, we need to
 624	 * restore at least the BARs so that the device will be
 625	 * accessible to its driver.
 626	 */
 627	if (need_restore)
 628		pci_restore_bars(dev);
 629
 630	if (dev->bus->self)
 631		pcie_aspm_pm_state_change(dev->bus->self);
 632
 633	return 0;
 634}
 635
 636/**
 637 * pci_update_current_state - Read PCI power state of given device from its
 638 *                            PCI PM registers and cache it
 639 * @dev: PCI device to handle.
 640 * @state: State to cache in case the device doesn't have the PM capability
 641 */
 642void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 643{
 644	if (dev->pm_cap) {
 645		u16 pmcsr;
 646
 
 
 
 
 
 
 
 
 
 
 647		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 648		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 649	} else {
 650		dev->current_state = state;
 651	}
 652}
 653
 654/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 655 * pci_platform_power_transition - Use platform to change device power state
 656 * @dev: PCI device to handle.
 657 * @state: State to put the device into.
 658 */
 659static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 660{
 661	int error;
 662
 663	if (platform_pci_power_manageable(dev)) {
 664		error = platform_pci_set_power_state(dev, state);
 665		if (!error)
 666			pci_update_current_state(dev, state);
 667	} else {
 668		error = -ENODEV;
 669		/* Fall back to PCI_D0 if native PM is not supported */
 670		if (!dev->pm_cap)
 671			dev->current_state = PCI_D0;
 672	}
 673
 674	return error;
 675}
 676
 677/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 678 * __pci_start_power_transition - Start power transition of a PCI device
 679 * @dev: PCI device to handle.
 680 * @state: State to put the device into.
 681 */
 682static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 683{
 684	if (state == PCI_D0)
 685		pci_platform_power_transition(dev, PCI_D0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686}
 687
 688/**
 689 * __pci_complete_power_transition - Complete power transition of a PCI device
 690 * @dev: PCI device to handle.
 691 * @state: State to put the device into.
 692 *
 693 * This function should not be called directly by device drivers.
 694 */
 695int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 696{
 697	return state >= PCI_D0 ?
 698			pci_platform_power_transition(dev, state) : -EINVAL;
 
 
 
 
 
 
 
 699}
 700EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 701
 702/**
 703 * pci_set_power_state - Set the power state of a PCI device
 704 * @dev: PCI device to handle.
 705 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 706 *
 707 * Transition a device to a new power state, using the platform firmware and/or
 708 * the device's PCI PM registers.
 709 *
 710 * RETURN VALUE:
 711 * -EINVAL if the requested state is invalid.
 712 * -EIO if device does not support PCI PM or its PM capabilities register has a
 713 * wrong version, or device doesn't support the requested state.
 714 * 0 if device already is in the requested state.
 715 * 0 if device's power state has been successfully changed.
 716 */
 717int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 718{
 719	int error;
 720
 721	/* bound the state we're entering */
 722	if (state > PCI_D3hot)
 723		state = PCI_D3hot;
 724	else if (state < PCI_D0)
 725		state = PCI_D0;
 726	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 727		/*
 728		 * If the device or the parent bridge do not support PCI PM,
 729		 * ignore the request if we're doing anything other than putting
 730		 * it into D0 (which would only happen on boot).
 731		 */
 732		return 0;
 733
 
 
 
 
 734	__pci_start_power_transition(dev, state);
 735
 736	/* This device is quirked not to be put into D3, so
 737	   don't put it in D3 */
 738	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 739		return 0;
 740
 741	error = pci_raw_set_power_state(dev, state);
 
 
 
 
 
 742
 743	if (!__pci_complete_power_transition(dev, state))
 744		error = 0;
 745	/*
 746	 * When aspm_policy is "powersave" this call ensures
 747	 * that ASPM is configured.
 748	 */
 749	if (!error && dev->bus->self)
 750		pcie_aspm_powersave_config_link(dev->bus->self);
 751
 752	return error;
 753}
 
 754
 755/**
 756 * pci_choose_state - Choose the power state of a PCI device
 757 * @dev: PCI device to be suspended
 758 * @state: target sleep state for the whole system. This is the value
 759 *	that is passed to suspend() function.
 760 *
 761 * Returns PCI power state suitable for given device and given system
 762 * message.
 763 */
 764
 765pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 766{
 767	pci_power_t ret;
 768
 769	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 770		return PCI_D0;
 771
 772	ret = platform_pci_choose_state(dev);
 773	if (ret != PCI_POWER_ERROR)
 774		return ret;
 775
 776	switch (state.event) {
 777	case PM_EVENT_ON:
 778		return PCI_D0;
 779	case PM_EVENT_FREEZE:
 780	case PM_EVENT_PRETHAW:
 781		/* REVISIT both freeze and pre-thaw "should" use D0 */
 782	case PM_EVENT_SUSPEND:
 783	case PM_EVENT_HIBERNATE:
 784		return PCI_D3hot;
 785	default:
 786		dev_info(&dev->dev, "unrecognized suspend event %d\n",
 787			 state.event);
 788		BUG();
 789	}
 790	return PCI_D0;
 791}
 792
 793EXPORT_SYMBOL(pci_choose_state);
 794
 795#define PCI_EXP_SAVE_REGS	7
 796
 797#define pcie_cap_has_devctl(type, flags)	1
 798#define pcie_cap_has_lnkctl(type, flags)		\
 799		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 800		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 801		  type == PCI_EXP_TYPE_ENDPOINT ||	\
 802		  type == PCI_EXP_TYPE_LEG_END))
 803#define pcie_cap_has_sltctl(type, flags)		\
 804		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 805		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
 806		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
 807		   (flags & PCI_EXP_FLAGS_SLOT))))
 808#define pcie_cap_has_rtctl(type, flags)			\
 809		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 810		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 811		  type == PCI_EXP_TYPE_RC_EC))
 812#define pcie_cap_has_devctl2(type, flags)		\
 813		((flags & PCI_EXP_FLAGS_VERS) > 1)
 814#define pcie_cap_has_lnkctl2(type, flags)		\
 815		((flags & PCI_EXP_FLAGS_VERS) > 1)
 816#define pcie_cap_has_sltctl2(type, flags)		\
 817		((flags & PCI_EXP_FLAGS_VERS) > 1)
 818
 819static int pci_save_pcie_state(struct pci_dev *dev)
 820{
 821	int pos, i = 0;
 822	struct pci_cap_saved_state *save_state;
 823	u16 *cap;
 824	u16 flags;
 825
 826	pos = pci_pcie_cap(dev);
 827	if (!pos)
 828		return 0;
 829
 830	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 831	if (!save_state) {
 832		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 833		return -ENOMEM;
 834	}
 835	cap = (u16 *)&save_state->cap.data[0];
 836
 837	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 838
 839	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 840		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
 841	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 842		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 843	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 844		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 845	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 846		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
 847	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 848		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
 849	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 850		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
 851	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 852		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
 853
 854	return 0;
 855}
 856
 857static void pci_restore_pcie_state(struct pci_dev *dev)
 858{
 859	int i = 0, pos;
 860	struct pci_cap_saved_state *save_state;
 861	u16 *cap;
 862	u16 flags;
 863
 864	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 865	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 866	if (!save_state || pos <= 0)
 867		return;
 868	cap = (u16 *)&save_state->cap.data[0];
 869
 870	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 871
 872	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 873		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
 874	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 875		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
 876	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 877		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
 878	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 879		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
 880	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 881		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
 882	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 883		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
 884	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 885		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
 886}
 887
 888
 889static int pci_save_pcix_state(struct pci_dev *dev)
 890{
 891	int pos;
 892	struct pci_cap_saved_state *save_state;
 893
 894	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 895	if (pos <= 0)
 896		return 0;
 897
 898	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 899	if (!save_state) {
 900		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 901		return -ENOMEM;
 902	}
 903
 904	pci_read_config_word(dev, pos + PCI_X_CMD,
 905			     (u16 *)save_state->cap.data);
 906
 907	return 0;
 908}
 909
 910static void pci_restore_pcix_state(struct pci_dev *dev)
 911{
 912	int i = 0, pos;
 913	struct pci_cap_saved_state *save_state;
 914	u16 *cap;
 915
 916	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 917	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 918	if (!save_state || pos <= 0)
 919		return;
 920	cap = (u16 *)&save_state->cap.data[0];
 921
 922	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 923}
 924
 925
 926/**
 927 * pci_save_state - save the PCI configuration space of a device before suspending
 928 * @dev: - PCI device that we're dealing with
 929 */
 930int
 931pci_save_state(struct pci_dev *dev)
 932{
 933	int i;
 934	/* XXX: 100% dword access ok here? */
 935	for (i = 0; i < 16; i++)
 936		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 937	dev->state_saved = true;
 938	if ((i = pci_save_pcie_state(dev)) != 0)
 
 
 939		return i;
 940	if ((i = pci_save_pcix_state(dev)) != 0)
 
 
 941		return i;
 942	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943}
 944
 945/** 
 946 * pci_restore_state - Restore the saved state of a PCI device
 947 * @dev: - PCI device that we're dealing with
 948 */
 949void pci_restore_state(struct pci_dev *dev)
 950{
 951	int i;
 952	u32 val;
 953
 954	if (!dev->state_saved)
 955		return;
 956
 957	/* PCI Express register must be restored first */
 958	pci_restore_pcie_state(dev);
 
 
 
 
 
 
 959
 960	/*
 961	 * The Base Address register should be programmed before the command
 962	 * register(s)
 963	 */
 964	for (i = 15; i >= 0; i--) {
 965		pci_read_config_dword(dev, i * 4, &val);
 966		if (val != dev->saved_config_space[i]) {
 967			dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
 968				"space at offset %#x (was %#x, writing %#x)\n",
 969				i, val, (int)dev->saved_config_space[i]);
 970			pci_write_config_dword(dev,i * 4,
 971				dev->saved_config_space[i]);
 972		}
 973	}
 974	pci_restore_pcix_state(dev);
 975	pci_restore_msi_state(dev);
 
 
 
 976	pci_restore_iov_state(dev);
 977
 978	dev->state_saved = false;
 979}
 
 980
 981struct pci_saved_state {
 982	u32 config_space[16];
 983	struct pci_cap_saved_data cap[0];
 984};
 985
 986/**
 987 * pci_store_saved_state - Allocate and return an opaque struct containing
 988 *			   the device saved state.
 989 * @dev: PCI device that we're dealing with
 990 *
 991 * Rerturn NULL if no state or error.
 992 */
 993struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
 994{
 995	struct pci_saved_state *state;
 996	struct pci_cap_saved_state *tmp;
 997	struct pci_cap_saved_data *cap;
 998	struct hlist_node *pos;
 999	size_t size;
1000
1001	if (!dev->state_saved)
1002		return NULL;
1003
1004	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1005
1006	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1007		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1008
1009	state = kzalloc(size, GFP_KERNEL);
1010	if (!state)
1011		return NULL;
1012
1013	memcpy(state->config_space, dev->saved_config_space,
1014	       sizeof(state->config_space));
1015
1016	cap = state->cap;
1017	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1018		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1019		memcpy(cap, &tmp->cap, len);
1020		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1021	}
1022	/* Empty cap_save terminates list */
1023
1024	return state;
1025}
1026EXPORT_SYMBOL_GPL(pci_store_saved_state);
1027
1028/**
1029 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1030 * @dev: PCI device that we're dealing with
1031 * @state: Saved state returned from pci_store_saved_state()
1032 */
1033int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
 
1034{
1035	struct pci_cap_saved_data *cap;
1036
1037	dev->state_saved = false;
1038
1039	if (!state)
1040		return 0;
1041
1042	memcpy(dev->saved_config_space, state->config_space,
1043	       sizeof(state->config_space));
1044
1045	cap = state->cap;
1046	while (cap->size) {
1047		struct pci_cap_saved_state *tmp;
1048
1049		tmp = pci_find_saved_cap(dev, cap->cap_nr);
1050		if (!tmp || tmp->cap.size != cap->size)
1051			return -EINVAL;
1052
1053		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1054		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1055		       sizeof(struct pci_cap_saved_data) + cap->size);
1056	}
1057
1058	dev->state_saved = true;
1059	return 0;
1060}
1061EXPORT_SYMBOL_GPL(pci_load_saved_state);
1062
1063/**
1064 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1065 *				   and free the memory allocated for it.
1066 * @dev: PCI device that we're dealing with
1067 * @state: Pointer to saved state returned from pci_store_saved_state()
1068 */
1069int pci_load_and_free_saved_state(struct pci_dev *dev,
1070				  struct pci_saved_state **state)
1071{
1072	int ret = pci_load_saved_state(dev, *state);
1073	kfree(*state);
1074	*state = NULL;
1075	return ret;
1076}
1077EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1078
 
 
 
 
 
1079static int do_pci_enable_device(struct pci_dev *dev, int bars)
1080{
1081	int err;
 
 
 
1082
1083	err = pci_set_power_state(dev, PCI_D0);
1084	if (err < 0 && err != -EIO)
1085		return err;
 
 
 
 
 
1086	err = pcibios_enable_device(dev, bars);
1087	if (err < 0)
1088		return err;
1089	pci_fixup_device(pci_fixup_enable, dev);
1090
 
 
 
 
 
 
 
 
 
 
 
1091	return 0;
1092}
1093
1094/**
1095 * pci_reenable_device - Resume abandoned device
1096 * @dev: PCI device to be resumed
1097 *
1098 *  Note this function is a backend of pci_default_resume and is not supposed
1099 *  to be called by normal code, write proper resume handler and use it instead.
1100 */
1101int pci_reenable_device(struct pci_dev *dev)
1102{
1103	if (pci_is_enabled(dev))
1104		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1105	return 0;
1106}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
1108static int __pci_enable_device_flags(struct pci_dev *dev,
1109				     resource_size_t flags)
1110{
 
1111	int err;
1112	int i, bars = 0;
1113
1114	/*
1115	 * Power state could be unknown at this point, either due to a fresh
1116	 * boot or a device removal call.  So get the current power state
1117	 * so that things like MSI message writing will behave as expected
1118	 * (e.g. if the device really is in D0 at enable time).
1119	 */
1120	if (dev->pm_cap) {
1121		u16 pmcsr;
1122		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1123		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1124	}
1125
1126	if (atomic_add_return(1, &dev->enable_cnt) > 1)
1127		return 0;		/* already enabled */
1128
1129	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
 
 
 
 
 
 
 
 
1130		if (dev->resource[i].flags & flags)
1131			bars |= (1 << i);
1132
1133	err = do_pci_enable_device(dev, bars);
1134	if (err < 0)
1135		atomic_dec(&dev->enable_cnt);
1136	return err;
1137}
1138
1139/**
1140 * pci_enable_device_io - Initialize a device for use with IO space
1141 * @dev: PCI device to be initialized
1142 *
1143 *  Initialize device before it's used by a driver. Ask low-level code
1144 *  to enable I/O resources. Wake up the device if it was suspended.
1145 *  Beware, this function can fail.
1146 */
1147int pci_enable_device_io(struct pci_dev *dev)
1148{
1149	return __pci_enable_device_flags(dev, IORESOURCE_IO);
1150}
 
1151
1152/**
1153 * pci_enable_device_mem - Initialize a device for use with Memory space
1154 * @dev: PCI device to be initialized
1155 *
1156 *  Initialize device before it's used by a driver. Ask low-level code
1157 *  to enable Memory resources. Wake up the device if it was suspended.
1158 *  Beware, this function can fail.
1159 */
1160int pci_enable_device_mem(struct pci_dev *dev)
1161{
1162	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1163}
 
1164
1165/**
1166 * pci_enable_device - Initialize device before it's used by a driver.
1167 * @dev: PCI device to be initialized
1168 *
1169 *  Initialize device before it's used by a driver. Ask low-level code
1170 *  to enable I/O and memory. Wake up the device if it was suspended.
1171 *  Beware, this function can fail.
1172 *
1173 *  Note we don't actually enable the device many times if we call
1174 *  this function repeatedly (we just increment the count).
1175 */
1176int pci_enable_device(struct pci_dev *dev)
1177{
1178	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1179}
 
1180
1181/*
1182 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1183 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1184 * there's no need to track it separately.  pci_devres is initialized
1185 * when a device is enabled using managed PCI device enable interface.
1186 */
1187struct pci_devres {
1188	unsigned int enabled:1;
1189	unsigned int pinned:1;
1190	unsigned int orig_intx:1;
1191	unsigned int restore_intx:1;
1192	u32 region_mask;
1193};
1194
1195static void pcim_release(struct device *gendev, void *res)
1196{
1197	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1198	struct pci_devres *this = res;
1199	int i;
1200
1201	if (dev->msi_enabled)
1202		pci_disable_msi(dev);
1203	if (dev->msix_enabled)
1204		pci_disable_msix(dev);
1205
1206	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1207		if (this->region_mask & (1 << i))
1208			pci_release_region(dev, i);
1209
1210	if (this->restore_intx)
1211		pci_intx(dev, this->orig_intx);
1212
1213	if (this->enabled && !this->pinned)
1214		pci_disable_device(dev);
1215}
1216
1217static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1218{
1219	struct pci_devres *dr, *new_dr;
1220
1221	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1222	if (dr)
1223		return dr;
1224
1225	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1226	if (!new_dr)
1227		return NULL;
1228	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1229}
1230
1231static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1232{
1233	if (pci_is_managed(pdev))
1234		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1235	return NULL;
1236}
1237
1238/**
1239 * pcim_enable_device - Managed pci_enable_device()
1240 * @pdev: PCI device to be initialized
1241 *
1242 * Managed pci_enable_device().
1243 */
1244int pcim_enable_device(struct pci_dev *pdev)
1245{
1246	struct pci_devres *dr;
1247	int rc;
1248
1249	dr = get_pci_dr(pdev);
1250	if (unlikely(!dr))
1251		return -ENOMEM;
1252	if (dr->enabled)
1253		return 0;
1254
1255	rc = pci_enable_device(pdev);
1256	if (!rc) {
1257		pdev->is_managed = 1;
1258		dr->enabled = 1;
1259	}
1260	return rc;
1261}
 
1262
1263/**
1264 * pcim_pin_device - Pin managed PCI device
1265 * @pdev: PCI device to pin
1266 *
1267 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1268 * driver detach.  @pdev must have been enabled with
1269 * pcim_enable_device().
1270 */
1271void pcim_pin_device(struct pci_dev *pdev)
1272{
1273	struct pci_devres *dr;
1274
1275	dr = find_pci_dr(pdev);
1276	WARN_ON(!dr || !dr->enabled);
1277	if (dr)
1278		dr->pinned = 1;
1279}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280
1281/**
1282 * pcibios_disable_device - disable arch specific PCI resources for device dev
1283 * @dev: the PCI device to disable
1284 *
1285 * Disables architecture specific PCI resources for the device. This
1286 * is the default implementation. Architecture implementations can
1287 * override this.
1288 */
1289void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
 
 
 
 
 
 
 
 
 
 
 
1290
1291static void do_pci_disable_device(struct pci_dev *dev)
1292{
1293	u16 pci_command;
1294
1295	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1296	if (pci_command & PCI_COMMAND_MASTER) {
1297		pci_command &= ~PCI_COMMAND_MASTER;
1298		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1299	}
1300
1301	pcibios_disable_device(dev);
1302}
1303
1304/**
1305 * pci_disable_enabled_device - Disable device without updating enable_cnt
1306 * @dev: PCI device to disable
1307 *
1308 * NOTE: This function is a backend of PCI power management routines and is
1309 * not supposed to be called drivers.
1310 */
1311void pci_disable_enabled_device(struct pci_dev *dev)
1312{
1313	if (pci_is_enabled(dev))
1314		do_pci_disable_device(dev);
1315}
1316
1317/**
1318 * pci_disable_device - Disable PCI device after use
1319 * @dev: PCI device to be disabled
1320 *
1321 * Signal to the system that the PCI device is not in use by the system
1322 * anymore.  This only involves disabling PCI bus-mastering, if active.
1323 *
1324 * Note we don't actually disable the device until all callers of
1325 * pci_enable_device() have called pci_disable_device().
1326 */
1327void
1328pci_disable_device(struct pci_dev *dev)
1329{
1330	struct pci_devres *dr;
1331
1332	dr = find_pci_dr(dev);
1333	if (dr)
1334		dr->enabled = 0;
1335
1336	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
 
 
 
1337		return;
1338
1339	do_pci_disable_device(dev);
1340
1341	dev->is_busmaster = 0;
1342}
 
1343
1344/**
1345 * pcibios_set_pcie_reset_state - set reset state for device dev
1346 * @dev: the PCIe device reset
1347 * @state: Reset state to enter into
1348 *
1349 *
1350 * Sets the PCIe reset state for the device. This is the default
1351 * implementation. Architecture implementations can override this.
1352 */
1353int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1354							enum pcie_reset_state state)
1355{
1356	return -EINVAL;
1357}
1358
1359/**
1360 * pci_set_pcie_reset_state - set reset state for device dev
1361 * @dev: the PCIe device reset
1362 * @state: Reset state to enter into
1363 *
1364 *
1365 * Sets the PCI reset state for the device.
1366 */
1367int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1368{
1369	return pcibios_set_pcie_reset_state(dev, state);
1370}
 
1371
1372/**
1373 * pci_check_pme_status - Check if given device has generated PME.
1374 * @dev: Device to check.
1375 *
1376 * Check the PME status of the device and if set, clear it and clear PME enable
1377 * (if set).  Return 'true' if PME status and PME enable were both set or
1378 * 'false' otherwise.
1379 */
1380bool pci_check_pme_status(struct pci_dev *dev)
1381{
1382	int pmcsr_pos;
1383	u16 pmcsr;
1384	bool ret = false;
1385
1386	if (!dev->pm_cap)
1387		return false;
1388
1389	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1390	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1391	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1392		return false;
1393
1394	/* Clear PME status. */
1395	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1396	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1397		/* Disable PME to avoid interrupt flood. */
1398		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1399		ret = true;
1400	}
1401
1402	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1403
1404	return ret;
1405}
1406
1407/**
1408 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1409 * @dev: Device to handle.
1410 * @ign: Ignored.
1411 *
1412 * Check if @dev has generated PME and queue a resume request for it in that
1413 * case.
1414 */
1415static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1416{
 
 
 
1417	if (pci_check_pme_status(dev)) {
1418		pci_wakeup_event(dev);
1419		pm_request_resume(&dev->dev);
1420	}
1421	return 0;
1422}
1423
1424/**
1425 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1426 * @bus: Top bus of the subtree to walk.
1427 */
1428void pci_pme_wakeup_bus(struct pci_bus *bus)
1429{
1430	if (bus)
1431		pci_walk_bus(bus, pci_pme_wakeup, NULL);
1432}
1433
 
1434/**
1435 * pci_pme_capable - check the capability of PCI device to generate PME#
1436 * @dev: PCI device to handle.
1437 * @state: PCI state from which device will issue PME#.
1438 */
1439bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1440{
1441	if (!dev->pm_cap)
1442		return false;
1443
1444	return !!(dev->pme_support & (1 << state));
1445}
 
1446
1447static void pci_pme_list_scan(struct work_struct *work)
1448{
1449	struct pci_pme_device *pme_dev;
1450
1451	mutex_lock(&pci_pme_list_mutex);
1452	if (!list_empty(&pci_pme_list)) {
1453		list_for_each_entry(pme_dev, &pci_pme_list, list)
 
 
 
 
 
 
 
 
 
 
1454			pci_pme_wakeup(pme_dev->dev, NULL);
1455		schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
 
 
 
1456	}
 
 
 
1457	mutex_unlock(&pci_pme_list_mutex);
1458}
1459
1460/**
1461 * pci_external_pme - is a device an external PCI PME source?
1462 * @dev: PCI device to check
1463 *
1464 */
1465
1466static bool pci_external_pme(struct pci_dev *dev)
1467{
1468	if (pci_is_pcie(dev) || dev->bus->number == 0)
1469		return false;
1470	return true;
 
 
 
 
 
 
 
 
 
1471}
1472
1473/**
1474 * pci_pme_active - enable or disable PCI device's PME# function
1475 * @dev: PCI device to handle.
1476 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1477 *
1478 * The caller must verify that the device is capable of generating PME# before
1479 * calling this function with @enable equal to 'true'.
1480 */
1481void pci_pme_active(struct pci_dev *dev, bool enable)
1482{
1483	u16 pmcsr;
1484
1485	if (!dev->pm_cap)
1486		return;
1487
1488	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1489	/* Clear PME_Status by writing 1 to it and enable PME# */
1490	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1491	if (!enable)
1492		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1493
1494	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1495
1496	/* PCI (as opposed to PCIe) PME requires that the device have
1497	   its PME# line hooked up correctly. Not all hardware vendors
1498	   do this, so the PME never gets delivered and the device
1499	   remains asleep. The easiest way around this is to
1500	   periodically walk the list of suspended devices and check
1501	   whether any have their PME flag set. The assumption is that
1502	   we'll wake up often enough anyway that this won't be a huge
1503	   hit, and the power savings from the devices will still be a
1504	   win. */
1505
1506	if (pci_external_pme(dev)) {
1507		struct pci_pme_device *pme_dev;
1508		if (enable) {
1509			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1510					  GFP_KERNEL);
1511			if (!pme_dev)
1512				goto out;
 
 
1513			pme_dev->dev = dev;
1514			mutex_lock(&pci_pme_list_mutex);
1515			list_add(&pme_dev->list, &pci_pme_list);
1516			if (list_is_singular(&pci_pme_list))
1517				schedule_delayed_work(&pci_pme_work,
1518						      msecs_to_jiffies(PME_TIMEOUT));
1519			mutex_unlock(&pci_pme_list_mutex);
1520		} else {
1521			mutex_lock(&pci_pme_list_mutex);
1522			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1523				if (pme_dev->dev == dev) {
1524					list_del(&pme_dev->list);
1525					kfree(pme_dev);
1526					break;
1527				}
1528			}
1529			mutex_unlock(&pci_pme_list_mutex);
1530		}
1531	}
1532
1533out:
1534	dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1535			enable ? "enabled" : "disabled");
1536}
 
1537
1538/**
1539 * __pci_enable_wake - enable PCI device as wakeup event source
1540 * @dev: PCI device affected
1541 * @state: PCI state from which device will issue wakeup events
1542 * @runtime: True if the events are to be generated at run time
1543 * @enable: True to enable event generation; false to disable
1544 *
1545 * This enables the device as a wakeup event source, or disables it.
1546 * When such events involves platform-specific hooks, those hooks are
1547 * called automatically by this routine.
1548 *
1549 * Devices with legacy power management (no standard PCI PM capabilities)
1550 * always require such platform hooks.
1551 *
1552 * RETURN VALUE:
1553 * 0 is returned on success
1554 * -EINVAL is returned if device is not supposed to wake up the system
1555 * Error code depending on the platform is returned if both the platform and
1556 * the native mechanism fail to enable the generation of wake-up events
1557 */
1558int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1559		      bool runtime, bool enable)
1560{
1561	int ret = 0;
1562
1563	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1564		return -EINVAL;
1565
1566	/* Don't do the same thing twice in a row for one device. */
1567	if (!!enable == !!dev->wakeup_prepared)
1568		return 0;
1569
1570	/*
1571	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1572	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1573	 * enable.  To disable wake-up we call the platform first, for symmetry.
1574	 */
1575
1576	if (enable) {
1577		int error;
1578
1579		if (pci_pme_capable(dev, state))
1580			pci_pme_active(dev, true);
1581		else
1582			ret = 1;
1583		error = runtime ? platform_pci_run_wake(dev, true) :
1584					platform_pci_sleep_wake(dev, true);
1585		if (ret)
1586			ret = error;
1587		if (!ret)
1588			dev->wakeup_prepared = true;
1589	} else {
1590		if (runtime)
1591			platform_pci_run_wake(dev, false);
1592		else
1593			platform_pci_sleep_wake(dev, false);
1594		pci_pme_active(dev, false);
1595		dev->wakeup_prepared = false;
1596	}
1597
1598	return ret;
1599}
1600EXPORT_SYMBOL(__pci_enable_wake);
1601
1602/**
1603 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1604 * @dev: PCI device to prepare
1605 * @enable: True to enable wake-up event generation; false to disable
1606 *
1607 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1608 * and this function allows them to set that up cleanly - pci_enable_wake()
1609 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1610 * ordering constraints.
1611 *
1612 * This function only returns error code if the device is not capable of
1613 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1614 * enable wake-up power for it.
1615 */
1616int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1617{
1618	return pci_pme_capable(dev, PCI_D3cold) ?
1619			pci_enable_wake(dev, PCI_D3cold, enable) :
1620			pci_enable_wake(dev, PCI_D3hot, enable);
1621}
 
1622
1623/**
1624 * pci_target_state - find an appropriate low power state for a given PCI dev
1625 * @dev: PCI device
1626 *
1627 * Use underlying platform code to find a supported low power state for @dev.
1628 * If the platform can't manage @dev, return the deepest state from which it
1629 * can generate wake events, based on any available PME info.
1630 */
1631pci_power_t pci_target_state(struct pci_dev *dev)
1632{
1633	pci_power_t target_state = PCI_D3hot;
1634
1635	if (platform_pci_power_manageable(dev)) {
1636		/*
1637		 * Call the platform to choose the target state of the device
1638		 * and enable wake-up from this state if supported.
1639		 */
1640		pci_power_t state = platform_pci_choose_state(dev);
1641
1642		switch (state) {
1643		case PCI_POWER_ERROR:
1644		case PCI_UNKNOWN:
1645			break;
1646		case PCI_D1:
1647		case PCI_D2:
1648			if (pci_no_d1d2(dev))
1649				break;
1650		default:
1651			target_state = state;
1652		}
1653	} else if (!dev->pm_cap) {
1654		target_state = PCI_D0;
1655	} else if (device_may_wakeup(&dev->dev)) {
1656		/*
1657		 * Find the deepest state from which the device can generate
1658		 * wake-up events, make it the target state and enable device
1659		 * to generate PME#.
1660		 */
1661		if (dev->pme_support) {
1662			while (target_state
1663			      && !(dev->pme_support & (1 << target_state)))
1664				target_state--;
1665		}
1666	}
1667
1668	return target_state;
1669}
1670
1671/**
1672 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1673 * @dev: Device to handle.
1674 *
1675 * Choose the power state appropriate for the device depending on whether
1676 * it can wake up the system and/or is power manageable by the platform
1677 * (PCI_D3hot is the default) and put the device into that state.
1678 */
1679int pci_prepare_to_sleep(struct pci_dev *dev)
1680{
1681	pci_power_t target_state = pci_target_state(dev);
1682	int error;
1683
1684	if (target_state == PCI_POWER_ERROR)
1685		return -EIO;
1686
1687	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1688
1689	error = pci_set_power_state(dev, target_state);
1690
1691	if (error)
1692		pci_enable_wake(dev, target_state, false);
1693
1694	return error;
1695}
 
1696
1697/**
1698 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1699 * @dev: Device to handle.
1700 *
1701 * Disable device's system wake-up capability and put it into D0.
1702 */
1703int pci_back_from_sleep(struct pci_dev *dev)
1704{
1705	pci_enable_wake(dev, PCI_D0, false);
1706	return pci_set_power_state(dev, PCI_D0);
1707}
 
1708
1709/**
1710 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1711 * @dev: PCI device being suspended.
1712 *
1713 * Prepare @dev to generate wake-up events at run time and put it into a low
1714 * power state.
1715 */
1716int pci_finish_runtime_suspend(struct pci_dev *dev)
1717{
1718	pci_power_t target_state = pci_target_state(dev);
1719	int error;
1720
1721	if (target_state == PCI_POWER_ERROR)
1722		return -EIO;
1723
 
 
1724	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1725
1726	error = pci_set_power_state(dev, target_state);
1727
1728	if (error)
1729		__pci_enable_wake(dev, target_state, true, false);
 
 
1730
1731	return error;
1732}
1733
1734/**
1735 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1736 * @dev: Device to check.
1737 *
1738 * Return true if the device itself is cabable of generating wake-up events
1739 * (through the platform or using the native PCIe PME) or if the device supports
1740 * PME and one of its upstream bridges can generate wake-up events.
1741 */
1742bool pci_dev_run_wake(struct pci_dev *dev)
1743{
1744	struct pci_bus *bus = dev->bus;
1745
1746	if (device_run_wake(&dev->dev))
1747		return true;
1748
1749	if (!dev->pme_support)
1750		return false;
1751
1752	while (bus->parent) {
1753		struct pci_dev *bridge = bus->self;
1754
1755		if (device_run_wake(&bridge->dev))
1756			return true;
1757
1758		bus = bus->parent;
1759	}
1760
1761	/* We have reached the root bus. */
1762	if (bus->bridge)
1763		return device_run_wake(bus->bridge);
1764
1765	return false;
1766}
1767EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1768
1769/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1770 * pci_pm_init - Initialize PM functions of given PCI device
1771 * @dev: PCI device to handle.
1772 */
1773void pci_pm_init(struct pci_dev *dev)
1774{
1775	int pm;
1776	u16 pmc;
1777
1778	pm_runtime_forbid(&dev->dev);
 
 
1779	device_enable_async_suspend(&dev->dev);
1780	dev->wakeup_prepared = false;
1781
1782	dev->pm_cap = 0;
 
1783
1784	/* find PCI PM capability in list */
1785	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1786	if (!pm)
1787		return;
1788	/* Check device's ability to generate PME# */
1789	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1790
1791	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1792		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1793			pmc & PCI_PM_CAP_VER_MASK);
1794		return;
1795	}
1796
1797	dev->pm_cap = pm;
1798	dev->d3_delay = PCI_PM_D3_WAIT;
 
 
1799
1800	dev->d1_support = false;
1801	dev->d2_support = false;
1802	if (!pci_no_d1d2(dev)) {
1803		if (pmc & PCI_PM_CAP_D1)
1804			dev->d1_support = true;
1805		if (pmc & PCI_PM_CAP_D2)
1806			dev->d2_support = true;
1807
1808		if (dev->d1_support || dev->d2_support)
1809			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1810				   dev->d1_support ? " D1" : "",
1811				   dev->d2_support ? " D2" : "");
1812	}
1813
1814	pmc &= PCI_PM_CAP_PME_MASK;
1815	if (pmc) {
1816		dev_printk(KERN_DEBUG, &dev->dev,
1817			 "PME# supported from%s%s%s%s%s\n",
1818			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1819			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1820			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1821			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1822			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1823		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
 
1824		/*
1825		 * Make device's PM flags reflect the wake-up capability, but
1826		 * let the user space enable it to wake up the system as needed.
1827		 */
1828		device_set_wakeup_capable(&dev->dev, true);
1829		/* Disable the PME# generation functionality */
1830		pci_pme_active(dev, false);
1831	} else {
1832		dev->pme_support = 0;
1833	}
1834}
1835
1836/**
1837 * platform_pci_wakeup_init - init platform wakeup if present
1838 * @dev: PCI device
1839 *
1840 * Some devices don't have PCI PM caps but can still generate wakeup
1841 * events through platform methods (like ACPI events).  If @dev supports
1842 * platform wakeup events, set the device flag to indicate as much.  This
1843 * may be redundant if the device also supports PCI PM caps, but double
1844 * initialization should be safe in that case.
1845 */
1846void platform_pci_wakeup_init(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847{
1848	if (!platform_pci_can_wakeup(dev))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1849		return;
1850
1851	device_set_wakeup_capable(&dev->dev, true);
1852	platform_pci_sleep_wake(dev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1853}
1854
1855/**
1856 * pci_add_save_buffer - allocate buffer for saving given capability registers
 
1857 * @dev: the PCI device
1858 * @cap: the capability to allocate the buffer for
 
1859 * @size: requested size of the buffer
1860 */
1861static int pci_add_cap_save_buffer(
1862	struct pci_dev *dev, char cap, unsigned int size)
1863{
1864	int pos;
1865	struct pci_cap_saved_state *save_state;
1866
1867	pos = pci_find_capability(dev, cap);
1868	if (pos <= 0)
 
 
 
 
1869		return 0;
1870
1871	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1872	if (!save_state)
1873		return -ENOMEM;
1874
1875	save_state->cap.cap_nr = cap;
 
1876	save_state->cap.size = size;
1877	pci_add_saved_cap(dev, save_state);
1878
1879	return 0;
1880}
1881
 
 
 
 
 
 
 
 
 
 
1882/**
1883 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1884 * @dev: the PCI device
1885 */
1886void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1887{
1888	int error;
1889
1890	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1891					PCI_EXP_SAVE_REGS * sizeof(u16));
1892	if (error)
1893		dev_err(&dev->dev,
1894			"unable to preallocate PCI Express save buffer\n");
1895
1896	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1897	if (error)
1898		dev_err(&dev->dev,
1899			"unable to preallocate PCI-X save buffer\n");
 
 
 
 
 
 
 
 
 
 
 
1900}
1901
1902/**
1903 * pci_enable_ari - enable ARI forwarding if hardware support it
1904 * @dev: the PCI device
 
 
 
1905 */
1906void pci_enable_ari(struct pci_dev *dev)
1907{
1908	int pos;
1909	u32 cap;
1910	u16 flags, ctrl;
1911	struct pci_dev *bridge;
1912
1913	if (!pci_is_pcie(dev) || dev->devfn)
1914		return;
1915
1916	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1917	if (!pos)
1918		return;
1919
1920	bridge = dev->bus->self;
1921	if (!bridge || !pci_is_pcie(bridge))
1922		return;
1923
1924	pos = pci_pcie_cap(bridge);
1925	if (!pos)
1926		return;
1927
1928	/* ARI is a PCIe v2 feature */
1929	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1930	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
1931		return;
1932
1933	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1934	if (!(cap & PCI_EXP_DEVCAP2_ARI))
1935		return;
1936
1937	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1938	ctrl |= PCI_EXP_DEVCTL2_ARI;
1939	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1940
1941	bridge->ari_enabled = 1;
 
 
 
 
1942}
1943
 
 
1944/**
1945 * pci_enable_ido - enable ID-based ordering on a device
1946 * @dev: the PCI device
1947 * @type: which types of IDO to enable
1948 *
1949 * Enable ID-based ordering on @dev.  @type can contain the bits
1950 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
1951 * which types of transactions are allowed to be re-ordered.
1952 */
1953void pci_enable_ido(struct pci_dev *dev, unsigned long type)
1954{
1955	int pos;
1956	u16 ctrl;
1957
1958	pos = pci_pcie_cap(dev);
1959	if (!pos)
1960		return;
1961
1962	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1963	if (type & PCI_EXP_IDO_REQUEST)
1964		ctrl |= PCI_EXP_IDO_REQ_EN;
1965	if (type & PCI_EXP_IDO_COMPLETION)
1966		ctrl |= PCI_EXP_IDO_CMP_EN;
1967	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1968}
1969EXPORT_SYMBOL(pci_enable_ido);
1970
1971/**
1972 * pci_disable_ido - disable ID-based ordering on a device
1973 * @dev: the PCI device
1974 * @type: which types of IDO to disable
1975 */
1976void pci_disable_ido(struct pci_dev *dev, unsigned long type)
1977{
1978	int pos;
 
1979	u16 ctrl;
1980
1981	if (!pci_is_pcie(dev))
1982		return;
1983
1984	pos = pci_pcie_cap(dev);
1985	if (!pos)
1986		return;
1987
1988	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1989	if (type & PCI_EXP_IDO_REQUEST)
1990		ctrl &= ~PCI_EXP_IDO_REQ_EN;
1991	if (type & PCI_EXP_IDO_COMPLETION)
1992		ctrl &= ~PCI_EXP_IDO_CMP_EN;
1993	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1994}
1995EXPORT_SYMBOL(pci_disable_ido);
1996
1997/**
1998 * pci_enable_obff - enable optimized buffer flush/fill
1999 * @dev: PCI device
2000 * @type: type of signaling to use
2001 *
2002 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2003 * signaling if possible, falling back to message signaling only if
2004 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2005 * be brought out of L0s or L1 to send the message.  It should be either
2006 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2007 *
2008 * If your device can benefit from receiving all messages, even at the
2009 * power cost of bringing the link back up from a low power state, use
2010 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2011 * preferred type).
2012 *
2013 * RETURNS:
2014 * Zero on success, appropriate error number on failure.
2015 */
2016int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2017{
2018	int pos;
2019	u32 cap;
2020	u16 ctrl;
2021	int ret;
2022
2023	if (!pci_is_pcie(dev))
2024		return -ENOTSUPP;
2025
2026	pos = pci_pcie_cap(dev);
2027	if (!pos)
2028		return -ENOTSUPP;
2029
2030	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2031	if (!(cap & PCI_EXP_OBFF_MASK))
2032		return -ENOTSUPP; /* no OBFF support at all */
2033
2034	/* Make sure the topology supports OBFF as well */
2035	if (dev->bus) {
2036		ret = pci_enable_obff(dev->bus->self, type);
2037		if (ret)
2038			return ret;
2039	}
2040
2041	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2042	if (cap & PCI_EXP_OBFF_WAKE)
2043		ctrl |= PCI_EXP_OBFF_WAKE_EN;
2044	else {
2045		switch (type) {
2046		case PCI_EXP_OBFF_SIGNAL_L0:
2047			if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2048				ctrl |= PCI_EXP_OBFF_MSGA_EN;
2049			break;
2050		case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2051			ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2052			ctrl |= PCI_EXP_OBFF_MSGB_EN;
2053			break;
2054		default:
2055			WARN(1, "bad OBFF signal type\n");
2056			return -ENOTSUPP;
2057		}
2058	}
2059	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2060
2061	return 0;
2062}
2063EXPORT_SYMBOL(pci_enable_obff);
2064
2065/**
2066 * pci_disable_obff - disable optimized buffer flush/fill
2067 * @dev: PCI device
2068 *
2069 * Disable OBFF on @dev.
2070 */
2071void pci_disable_obff(struct pci_dev *dev)
2072{
2073	int pos;
2074	u16 ctrl;
2075
2076	if (!pci_is_pcie(dev))
2077		return;
2078
2079	pos = pci_pcie_cap(dev);
2080	if (!pos)
2081		return;
2082
2083	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2084	ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2085	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2086}
2087EXPORT_SYMBOL(pci_disable_obff);
2088
2089/**
2090 * pci_ltr_supported - check whether a device supports LTR
2091 * @dev: PCI device
2092 *
2093 * RETURNS:
2094 * True if @dev supports latency tolerance reporting, false otherwise.
2095 */
2096bool pci_ltr_supported(struct pci_dev *dev)
2097{
2098	int pos;
2099	u32 cap;
2100
2101	if (!pci_is_pcie(dev))
2102		return false;
2103
2104	pos = pci_pcie_cap(dev);
2105	if (!pos)
2106		return false;
2107
2108	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
 
 
 
 
 
 
2109
2110	return cap & PCI_EXP_DEVCAP2_LTR;
 
2111}
2112EXPORT_SYMBOL(pci_ltr_supported);
2113
2114/**
2115 * pci_enable_ltr - enable latency tolerance reporting
2116 * @dev: PCI device
 
2117 *
2118 * Enable LTR on @dev if possible, which means enabling it first on
2119 * upstream ports.
2120 *
2121 * RETURNS:
2122 * Zero on success, errno on failure.
 
 
 
 
 
2123 */
2124int pci_enable_ltr(struct pci_dev *dev)
2125{
2126	int pos;
2127	u16 ctrl;
2128	int ret;
2129
2130	if (!pci_ltr_supported(dev))
2131		return -ENOTSUPP;
 
2132
2133	pos = pci_pcie_cap(dev);
2134	if (!pos)
2135		return -ENOTSUPP;
 
 
 
 
2136
2137	/* Only primary function can enable/disable LTR */
2138	if (PCI_FUNC(dev->devfn) != 0)
2139		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2140
2141	/* Enable upstream ports first */
2142	if (dev->bus) {
2143		ret = pci_enable_ltr(dev->bus->self);
2144		if (ret)
2145			return ret;
2146	}
2147
2148	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2149	ctrl |= PCI_EXP_LTR_EN;
2150	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2151
2152	return 0;
2153}
2154EXPORT_SYMBOL(pci_enable_ltr);
2155
2156/**
2157 * pci_disable_ltr - disable latency tolerance reporting
2158 * @dev: PCI device
2159 */
2160void pci_disable_ltr(struct pci_dev *dev)
2161{
2162	int pos;
2163	u16 ctrl;
2164
2165	if (!pci_ltr_supported(dev))
2166		return;
2167
2168	pos = pci_pcie_cap(dev);
2169	if (!pos)
2170		return;
2171
2172	/* Only primary function can enable/disable LTR */
2173	if (PCI_FUNC(dev->devfn) != 0)
2174		return;
2175
2176	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2177	ctrl &= ~PCI_EXP_LTR_EN;
2178	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2179}
2180EXPORT_SYMBOL(pci_disable_ltr);
2181
2182static int __pci_ltr_scale(int *val)
2183{
2184	int scale = 0;
2185
2186	while (*val > 1023) {
2187		*val = (*val + 31) / 32;
2188		scale++;
2189	}
2190	return scale;
2191}
2192
2193/**
2194 * pci_set_ltr - set LTR latency values
2195 * @dev: PCI device
2196 * @snoop_lat_ns: snoop latency in nanoseconds
2197 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2198 *
2199 * Figure out the scale and set the LTR values accordingly.
2200 */
2201int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2202{
2203	int pos, ret, snoop_scale, nosnoop_scale;
2204	u16 val;
2205
2206	if (!pci_ltr_supported(dev))
2207		return -ENOTSUPP;
2208
2209	snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2210	nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2211
2212	if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2213	    nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2214		return -EINVAL;
2215
2216	if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2217	    (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2218		return -EINVAL;
2219
2220	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2221	if (!pos)
2222		return -ENOTSUPP;
2223
2224	val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2225	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2226	if (ret != 4)
2227		return -EIO;
2228
2229	val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2230	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2231	if (ret != 4)
2232		return -EIO;
2233
2234	return 0;
2235}
2236EXPORT_SYMBOL(pci_set_ltr);
2237
2238static int pci_acs_enable;
2239
2240/**
2241 * pci_request_acs - ask for ACS to be enabled if supported
2242 */
2243void pci_request_acs(void)
2244{
2245	pci_acs_enable = 1;
2246}
2247
2248/**
2249 * pci_enable_acs - enable ACS if hardware support it
2250 * @dev: the PCI device
2251 */
2252void pci_enable_acs(struct pci_dev *dev)
2253{
2254	int pos;
2255	u16 cap;
2256	u16 ctrl;
2257
2258	if (!pci_acs_enable)
2259		return;
2260
2261	if (!pci_is_pcie(dev))
2262		return;
2263
2264	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2265	if (!pos)
2266		return;
2267
2268	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2269	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2270
2271	/* Source Validation */
2272	ctrl |= (cap & PCI_ACS_SV);
2273
2274	/* P2P Request Redirect */
2275	ctrl |= (cap & PCI_ACS_RR);
2276
2277	/* P2P Completion Redirect */
2278	ctrl |= (cap & PCI_ACS_CR);
2279
2280	/* Upstream Forwarding */
2281	ctrl |= (cap & PCI_ACS_UF);
2282
2283	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2284}
2285
2286/**
2287 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2288 * @dev: the PCI device
2289 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2290 *
2291 * Perform INTx swizzling for a device behind one level of bridge.  This is
2292 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2293 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2294 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2295 * the PCI Express Base Specification, Revision 2.1)
2296 */
2297u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2298{
2299	int slot;
2300
2301	if (pci_ari_enabled(dev->bus))
2302		slot = 0;
2303	else
2304		slot = PCI_SLOT(dev->devfn);
2305
2306	return (((pin - 1) + slot) % 4) + 1;
2307}
2308
2309int
2310pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2311{
2312	u8 pin;
2313
2314	pin = dev->pin;
2315	if (!pin)
2316		return -1;
2317
2318	while (!pci_is_root_bus(dev->bus)) {
2319		pin = pci_swizzle_interrupt_pin(dev, pin);
2320		dev = dev->bus->self;
2321	}
2322	*bridge = dev;
2323	return pin;
2324}
2325
2326/**
2327 * pci_common_swizzle - swizzle INTx all the way to root bridge
2328 * @dev: the PCI device
2329 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2330 *
2331 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2332 * bridges all the way up to a PCI root bus.
2333 */
2334u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2335{
2336	u8 pin = *pinp;
2337
2338	while (!pci_is_root_bus(dev->bus)) {
2339		pin = pci_swizzle_interrupt_pin(dev, pin);
2340		dev = dev->bus->self;
2341	}
2342	*pinp = pin;
2343	return PCI_SLOT(dev->devfn);
2344}
 
2345
2346/**
2347 *	pci_release_region - Release a PCI bar
2348 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2349 *	@bar: BAR to release
2350 *
2351 *	Releases the PCI I/O and memory resources previously reserved by a
2352 *	successful call to pci_request_region.  Call this function only
2353 *	after all use of the PCI regions has ceased.
2354 */
2355void pci_release_region(struct pci_dev *pdev, int bar)
2356{
2357	struct pci_devres *dr;
2358
2359	if (pci_resource_len(pdev, bar) == 0)
2360		return;
2361	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2362		release_region(pci_resource_start(pdev, bar),
2363				pci_resource_len(pdev, bar));
2364	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2365		release_mem_region(pci_resource_start(pdev, bar),
2366				pci_resource_len(pdev, bar));
2367
2368	dr = find_pci_dr(pdev);
2369	if (dr)
2370		dr->region_mask &= ~(1 << bar);
2371}
 
2372
2373/**
2374 *	__pci_request_region - Reserved PCI I/O and memory resource
2375 *	@pdev: PCI device whose resources are to be reserved
2376 *	@bar: BAR to be reserved
2377 *	@res_name: Name to be associated with resource.
2378 *	@exclusive: whether the region access is exclusive or not
2379 *
2380 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2381 *	being reserved by owner @res_name.  Do not access any
2382 *	address inside the PCI regions unless this call returns
2383 *	successfully.
2384 *
2385 *	If @exclusive is set, then the region is marked so that userspace
2386 *	is explicitly not allowed to map the resource via /dev/mem or
2387 * 	sysfs MMIO access.
2388 *
2389 *	Returns 0 on success, or %EBUSY on error.  A warning
2390 *	message is also printed on failure.
2391 */
2392static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2393									int exclusive)
2394{
2395	struct pci_devres *dr;
2396
2397	if (pci_resource_len(pdev, bar) == 0)
2398		return 0;
2399		
2400	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2401		if (!request_region(pci_resource_start(pdev, bar),
2402			    pci_resource_len(pdev, bar), res_name))
2403			goto err_out;
2404	}
2405	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2406		if (!__request_mem_region(pci_resource_start(pdev, bar),
2407					pci_resource_len(pdev, bar), res_name,
2408					exclusive))
2409			goto err_out;
2410	}
2411
2412	dr = find_pci_dr(pdev);
2413	if (dr)
2414		dr->region_mask |= 1 << bar;
2415
2416	return 0;
2417
2418err_out:
2419	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2420		 &pdev->resource[bar]);
2421	return -EBUSY;
2422}
2423
2424/**
2425 *	pci_request_region - Reserve PCI I/O and memory resource
2426 *	@pdev: PCI device whose resources are to be reserved
2427 *	@bar: BAR to be reserved
2428 *	@res_name: Name to be associated with resource
2429 *
2430 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2431 *	being reserved by owner @res_name.  Do not access any
2432 *	address inside the PCI regions unless this call returns
2433 *	successfully.
2434 *
2435 *	Returns 0 on success, or %EBUSY on error.  A warning
2436 *	message is also printed on failure.
2437 */
2438int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2439{
2440	return __pci_request_region(pdev, bar, res_name, 0);
2441}
 
2442
2443/**
2444 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2445 *	@pdev: PCI device whose resources are to be reserved
2446 *	@bar: BAR to be reserved
2447 *	@res_name: Name to be associated with resource.
2448 *
2449 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2450 *	being reserved by owner @res_name.  Do not access any
2451 *	address inside the PCI regions unless this call returns
2452 *	successfully.
2453 *
2454 *	Returns 0 on success, or %EBUSY on error.  A warning
2455 *	message is also printed on failure.
2456 *
2457 *	The key difference that _exclusive makes it that userspace is
2458 *	explicitly not allowed to map the resource via /dev/mem or
2459 * 	sysfs.
2460 */
2461int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
 
2462{
2463	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2464}
 
 
2465/**
2466 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2467 * @pdev: PCI device whose resources were previously reserved
2468 * @bars: Bitmask of BARs to be released
2469 *
2470 * Release selected PCI I/O and memory resources previously reserved.
2471 * Call this function only after all use of the PCI regions has ceased.
2472 */
2473void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2474{
2475	int i;
2476
2477	for (i = 0; i < 6; i++)
2478		if (bars & (1 << i))
2479			pci_release_region(pdev, i);
2480}
 
2481
2482int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2483				 const char *res_name, int excl)
2484{
2485	int i;
2486
2487	for (i = 0; i < 6; i++)
2488		if (bars & (1 << i))
2489			if (__pci_request_region(pdev, i, res_name, excl))
2490				goto err_out;
2491	return 0;
2492
2493err_out:
2494	while(--i >= 0)
2495		if (bars & (1 << i))
2496			pci_release_region(pdev, i);
2497
2498	return -EBUSY;
2499}
2500
2501
2502/**
2503 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2504 * @pdev: PCI device whose resources are to be reserved
2505 * @bars: Bitmask of BARs to be requested
2506 * @res_name: Name to be associated with resource
2507 */
2508int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2509				 const char *res_name)
2510{
2511	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2512}
 
2513
2514int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2515				 int bars, const char *res_name)
2516{
2517	return __pci_request_selected_regions(pdev, bars, res_name,
2518			IORESOURCE_EXCLUSIVE);
2519}
 
2520
2521/**
2522 *	pci_release_regions - Release reserved PCI I/O and memory resources
2523 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2524 *
2525 *	Releases all PCI I/O and memory resources previously reserved by a
2526 *	successful call to pci_request_regions.  Call this function only
2527 *	after all use of the PCI regions has ceased.
2528 */
2529
2530void pci_release_regions(struct pci_dev *pdev)
2531{
2532	pci_release_selected_regions(pdev, (1 << 6) - 1);
2533}
 
2534
2535/**
2536 *	pci_request_regions - Reserved PCI I/O and memory resources
2537 *	@pdev: PCI device whose resources are to be reserved
2538 *	@res_name: Name to be associated with resource.
2539 *
2540 *	Mark all PCI regions associated with PCI device @pdev as
2541 *	being reserved by owner @res_name.  Do not access any
2542 *	address inside the PCI regions unless this call returns
2543 *	successfully.
2544 *
2545 *	Returns 0 on success, or %EBUSY on error.  A warning
2546 *	message is also printed on failure.
2547 */
2548int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2549{
2550	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2551}
 
2552
2553/**
2554 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2555 *	@pdev: PCI device whose resources are to be reserved
2556 *	@res_name: Name to be associated with resource.
2557 *
2558 *	Mark all PCI regions associated with PCI device @pdev as
2559 *	being reserved by owner @res_name.  Do not access any
2560 *	address inside the PCI regions unless this call returns
2561 *	successfully.
2562 *
2563 *	pci_request_regions_exclusive() will mark the region so that
2564 * 	/dev/mem and the sysfs MMIO access will not be allowed.
2565 *
2566 *	Returns 0 on success, or %EBUSY on error.  A warning
2567 *	message is also printed on failure.
2568 */
2569int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2570{
2571	return pci_request_selected_regions_exclusive(pdev,
2572					((1 << 6) - 1), res_name);
2573}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2574
2575static void __pci_set_master(struct pci_dev *dev, bool enable)
2576{
2577	u16 old_cmd, cmd;
2578
2579	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2580	if (enable)
2581		cmd = old_cmd | PCI_COMMAND_MASTER;
2582	else
2583		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2584	if (cmd != old_cmd) {
2585		dev_dbg(&dev->dev, "%s bus mastering\n",
2586			enable ? "enabling" : "disabling");
2587		pci_write_config_word(dev, PCI_COMMAND, cmd);
2588	}
2589	dev->is_busmaster = enable;
2590}
2591
2592/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2593 * pci_set_master - enables bus-mastering for device dev
2594 * @dev: the PCI device to enable
2595 *
2596 * Enables bus-mastering on the device and calls pcibios_set_master()
2597 * to do the needed arch specific settings.
2598 */
2599void pci_set_master(struct pci_dev *dev)
2600{
2601	__pci_set_master(dev, true);
2602	pcibios_set_master(dev);
2603}
 
2604
2605/**
2606 * pci_clear_master - disables bus-mastering for device dev
2607 * @dev: the PCI device to disable
2608 */
2609void pci_clear_master(struct pci_dev *dev)
2610{
2611	__pci_set_master(dev, false);
2612}
 
2613
2614/**
2615 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2616 * @dev: the PCI device for which MWI is to be enabled
2617 *
2618 * Helper function for pci_set_mwi.
2619 * Originally copied from drivers/net/acenic.c.
2620 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2621 *
2622 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2623 */
2624int pci_set_cacheline_size(struct pci_dev *dev)
2625{
2626	u8 cacheline_size;
2627
2628	if (!pci_cache_line_size)
2629		return -EINVAL;
2630
2631	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2632	   equal to or multiple of the right value. */
2633	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2634	if (cacheline_size >= pci_cache_line_size &&
2635	    (cacheline_size % pci_cache_line_size) == 0)
2636		return 0;
2637
2638	/* Write the correct value. */
2639	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2640	/* Read it back. */
2641	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2642	if (cacheline_size == pci_cache_line_size)
2643		return 0;
2644
2645	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2646		   "supported\n", pci_cache_line_size << 2);
2647
2648	return -EINVAL;
2649}
2650EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2651
2652#ifdef PCI_DISABLE_MWI
2653int pci_set_mwi(struct pci_dev *dev)
2654{
2655	return 0;
2656}
2657
2658int pci_try_set_mwi(struct pci_dev *dev)
2659{
2660	return 0;
2661}
2662
2663void pci_clear_mwi(struct pci_dev *dev)
2664{
2665}
2666
2667#else
2668
2669/**
2670 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2671 * @dev: the PCI device for which MWI is enabled
2672 *
2673 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2674 *
2675 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2676 */
2677int
2678pci_set_mwi(struct pci_dev *dev)
2679{
 
 
 
2680	int rc;
2681	u16 cmd;
2682
2683	rc = pci_set_cacheline_size(dev);
2684	if (rc)
2685		return rc;
2686
2687	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2688	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2689		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2690		cmd |= PCI_COMMAND_INVALIDATE;
2691		pci_write_config_word(dev, PCI_COMMAND, cmd);
2692	}
2693	
2694	return 0;
 
2695}
 
2696
2697/**
2698 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2699 * @dev: the PCI device for which MWI is enabled
2700 *
2701 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2702 * Callers are not required to check the return value.
2703 *
2704 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2705 */
2706int pci_try_set_mwi(struct pci_dev *dev)
2707{
2708	int rc = pci_set_mwi(dev);
2709	return rc;
 
 
 
2710}
 
2711
2712/**
2713 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2714 * @dev: the PCI device to disable
2715 *
2716 * Disables PCI Memory-Write-Invalidate transaction on the device
2717 */
2718void
2719pci_clear_mwi(struct pci_dev *dev)
2720{
 
2721	u16 cmd;
2722
2723	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2724	if (cmd & PCI_COMMAND_INVALIDATE) {
2725		cmd &= ~PCI_COMMAND_INVALIDATE;
2726		pci_write_config_word(dev, PCI_COMMAND, cmd);
2727	}
 
2728}
2729#endif /* ! PCI_DISABLE_MWI */
2730
2731/**
2732 * pci_intx - enables/disables PCI INTx for device dev
2733 * @pdev: the PCI device to operate on
2734 * @enable: boolean: whether to enable or disable PCI INTx
2735 *
2736 * Enables/disables PCI INTx for device dev
2737 */
2738void
2739pci_intx(struct pci_dev *pdev, int enable)
2740{
2741	u16 pci_command, new;
2742
2743	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2744
2745	if (enable) {
2746		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2747	} else {
2748		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2749	}
2750
2751	if (new != pci_command) {
2752		struct pci_devres *dr;
2753
2754		pci_write_config_word(pdev, PCI_COMMAND, new);
2755
2756		dr = find_pci_dr(pdev);
2757		if (dr && !dr->restore_intx) {
2758			dr->restore_intx = 1;
2759			dr->orig_intx = !enable;
2760		}
2761	}
2762}
 
2763
2764/**
2765 * pci_msi_off - disables any msi or msix capabilities
2766 * @dev: the PCI device to operate on
2767 *
2768 * If you want to use msi see pci_enable_msi and friends.
2769 * This is a lower level primitive that allows us to disable
2770 * msi operation at the device level.
2771 */
2772void pci_msi_off(struct pci_dev *dev)
2773{
2774	int pos;
2775	u16 control;
 
 
 
 
 
 
 
 
 
 
2776
2777	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2778	if (pos) {
2779		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2780		control &= ~PCI_MSI_FLAGS_ENABLE;
2781		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
 
 
 
 
 
 
2782	}
2783	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2784	if (pos) {
2785		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2786		control &= ~PCI_MSIX_FLAGS_ENABLE;
2787		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2788	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2789}
2790EXPORT_SYMBOL_GPL(pci_msi_off);
2791
2792int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
 
 
 
 
 
 
 
 
2793{
2794	return dma_set_max_seg_size(&dev->dev, size);
2795}
2796EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2797
2798int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
 
 
 
 
 
 
2799{
2800	return dma_set_seg_boundary(&dev->dev, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2801}
2802EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2803
2804static int pcie_flr(struct pci_dev *dev, int probe)
2805{
2806	int i;
2807	int pos;
2808	u32 cap;
2809	u16 status, control;
2810
2811	pos = pci_pcie_cap(dev);
2812	if (!pos)
2813		return -ENOTTY;
2814
2815	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2816	if (!(cap & PCI_EXP_DEVCAP_FLR))
2817		return -ENOTTY;
2818
2819	if (probe)
2820		return 0;
2821
2822	/* Wait for Transaction Pending bit clean */
2823	for (i = 0; i < 4; i++) {
2824		if (i)
2825			msleep((1 << (i - 1)) * 100);
2826
2827		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2828		if (!(status & PCI_EXP_DEVSTA_TRPND))
2829			goto clear;
2830	}
2831
2832	dev_err(&dev->dev, "transaction is not cleared; "
2833			"proceeding with reset anyway\n");
2834
2835clear:
2836	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2837	control |= PCI_EXP_DEVCTL_BCR_FLR;
2838	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2839
2840	msleep(100);
2841
 
 
2842	return 0;
2843}
2844
2845static int pci_af_flr(struct pci_dev *dev, int probe)
2846{
2847	int i;
2848	int pos;
2849	u8 cap;
2850	u8 status;
2851
2852	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2853	if (!pos)
2854		return -ENOTTY;
2855
2856	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2857	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2858		return -ENOTTY;
2859
2860	if (probe)
2861		return 0;
2862
2863	/* Wait for Transaction Pending bit clean */
2864	for (i = 0; i < 4; i++) {
2865		if (i)
2866			msleep((1 << (i - 1)) * 100);
2867
2868		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2869		if (!(status & PCI_AF_STATUS_TP))
2870			goto clear;
2871	}
2872
2873	dev_err(&dev->dev, "transaction is not cleared; "
2874			"proceeding with reset anyway\n");
2875
2876clear:
2877	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
2878	msleep(100);
2879
2880	return 0;
2881}
2882
2883/**
2884 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
2885 * @dev: Device to reset.
2886 * @probe: If set, only check if the device can be reset this way.
2887 *
2888 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
2889 * unset, it will be reinitialized internally when going from PCI_D3hot to
2890 * PCI_D0.  If that's the case and the device is not in a low-power state
2891 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
2892 *
2893 * NOTE: This causes the caller to sleep for twice the device power transition
2894 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
2895 * by devault (i.e. unless the @dev's d3_delay field has a different value).
2896 * Moreover, only devices in D0 can be reset by this function.
2897 */
2898static int pci_pm_reset(struct pci_dev *dev, int probe)
2899{
2900	u16 csr;
2901
2902	if (!dev->pm_cap)
2903		return -ENOTTY;
2904
2905	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2906	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2907		return -ENOTTY;
2908
2909	if (probe)
2910		return 0;
2911
2912	if (dev->current_state != PCI_D0)
2913		return -EINVAL;
2914
2915	csr &= ~PCI_PM_CTRL_STATE_MASK;
2916	csr |= PCI_D3hot;
2917	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2918	pci_dev_d3_sleep(dev);
2919
2920	csr &= ~PCI_PM_CTRL_STATE_MASK;
2921	csr |= PCI_D0;
2922	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2923	pci_dev_d3_sleep(dev);
2924
2925	return 0;
2926}
2927
2928static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2929{
2930	u16 ctrl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2931	struct pci_dev *pdev;
2932
2933	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
 
2934		return -ENOTTY;
2935
2936	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2937		if (pdev != dev)
2938			return -ENOTTY;
2939
2940	if (probe)
2941		return 0;
2942
2943	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2944	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2945	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2946	msleep(100);
2947
2948	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2949	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2950	msleep(100);
2951
2952	return 0;
2953}
2954
2955static int pci_dev_reset(struct pci_dev *dev, int probe)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2956{
2957	int rc;
2958
2959	might_sleep();
2960
2961	if (!probe) {
2962		pci_block_user_cfg_access(dev);
2963		/* block PM suspend, driver probe, etc. */
2964		device_lock(&dev->dev);
2965	}
2966
2967	rc = pci_dev_specific_reset(dev, probe);
2968	if (rc != -ENOTTY)
2969		goto done;
2970
2971	rc = pcie_flr(dev, probe);
2972	if (rc != -ENOTTY)
2973		goto done;
2974
2975	rc = pci_af_flr(dev, probe);
2976	if (rc != -ENOTTY)
2977		goto done;
2978
2979	rc = pci_pm_reset(dev, probe);
2980	if (rc != -ENOTTY)
2981		goto done;
2982
 
 
 
 
2983	rc = pci_parent_bus_reset(dev, probe);
2984done:
2985	if (!probe) {
2986		device_unlock(&dev->dev);
2987		pci_unblock_user_cfg_access(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2988	}
2989
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2990	return rc;
2991}
2992
2993/**
2994 * __pci_reset_function - reset a PCI device function
2995 * @dev: PCI device to reset
2996 *
2997 * Some devices allow an individual function to be reset without affecting
2998 * other functions in the same device.  The PCI device must be responsive
2999 * to PCI config space in order to use this function.
3000 *
3001 * The device function is presumed to be unused when this function is called.
3002 * Resetting the device will make the contents of PCI configuration space
3003 * random, so any caller of this must be prepared to reinitialise the
3004 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3005 * etc.
3006 *
3007 * Returns 0 if the device function was successfully reset or negative if the
3008 * device doesn't support resetting a single function.
3009 */
3010int __pci_reset_function(struct pci_dev *dev)
3011{
3012	return pci_dev_reset(dev, 0);
3013}
3014EXPORT_SYMBOL_GPL(__pci_reset_function);
3015
3016/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3017 * pci_probe_reset_function - check whether the device can be safely reset
3018 * @dev: PCI device to reset
3019 *
3020 * Some devices allow an individual function to be reset without affecting
3021 * other functions in the same device.  The PCI device must be responsive
3022 * to PCI config space in order to use this function.
3023 *
3024 * Returns 0 if the device function can be reset or negative if the
3025 * device doesn't support resetting a single function.
3026 */
3027int pci_probe_reset_function(struct pci_dev *dev)
3028{
3029	return pci_dev_reset(dev, 1);
3030}
3031
3032/**
3033 * pci_reset_function - quiesce and reset a PCI device function
3034 * @dev: PCI device to reset
3035 *
3036 * Some devices allow an individual function to be reset without affecting
3037 * other functions in the same device.  The PCI device must be responsive
3038 * to PCI config space in order to use this function.
3039 *
3040 * This function does not just reset the PCI portion of a device, but
3041 * clears all the state associated with the device.  This function differs
3042 * from __pci_reset_function in that it saves and restores device state
3043 * over the reset.
3044 *
3045 * Returns 0 if the device function was successfully reset or negative if the
3046 * device doesn't support resetting a single function.
3047 */
3048int pci_reset_function(struct pci_dev *dev)
3049{
3050	int rc;
3051
3052	rc = pci_dev_reset(dev, 1);
3053	if (rc)
3054		return rc;
3055
3056	pci_save_state(dev);
3057
3058	/*
3059	 * both INTx and MSI are disabled after the Interrupt Disable bit
3060	 * is set and the Bus Master bit is cleared.
3061	 */
3062	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3063
3064	rc = pci_dev_reset(dev, 0);
3065
3066	pci_restore_state(dev);
3067
3068	return rc;
3069}
3070EXPORT_SYMBOL_GPL(pci_reset_function);
3071
3072/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3073 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3074 * @dev: PCI device to query
3075 *
3076 * Returns mmrbc: maximum designed memory read count in bytes
3077 *    or appropriate error value.
3078 */
3079int pcix_get_max_mmrbc(struct pci_dev *dev)
3080{
3081	int cap;
3082	u32 stat;
3083
3084	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3085	if (!cap)
3086		return -EINVAL;
3087
3088	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3089		return -EINVAL;
3090
3091	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3092}
3093EXPORT_SYMBOL(pcix_get_max_mmrbc);
3094
3095/**
3096 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3097 * @dev: PCI device to query
3098 *
3099 * Returns mmrbc: maximum memory read count in bytes
3100 *    or appropriate error value.
3101 */
3102int pcix_get_mmrbc(struct pci_dev *dev)
3103{
3104	int cap;
3105	u16 cmd;
3106
3107	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3108	if (!cap)
3109		return -EINVAL;
3110
3111	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3112		return -EINVAL;
3113
3114	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3115}
3116EXPORT_SYMBOL(pcix_get_mmrbc);
3117
3118/**
3119 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3120 * @dev: PCI device to query
3121 * @mmrbc: maximum memory read count in bytes
3122 *    valid values are 512, 1024, 2048, 4096
3123 *
3124 * If possible sets maximum memory read byte count, some bridges have erratas
3125 * that prevent this.
3126 */
3127int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3128{
3129	int cap;
3130	u32 stat, v, o;
3131	u16 cmd;
3132
3133	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3134		return -EINVAL;
3135
3136	v = ffs(mmrbc) - 10;
3137
3138	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3139	if (!cap)
3140		return -EINVAL;
3141
3142	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3143		return -EINVAL;
3144
3145	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3146		return -E2BIG;
3147
3148	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3149		return -EINVAL;
3150
3151	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3152	if (o != v) {
3153		if (v > o && dev->bus &&
3154		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3155			return -EIO;
3156
3157		cmd &= ~PCI_X_CMD_MAX_READ;
3158		cmd |= v << 2;
3159		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3160			return -EIO;
3161	}
3162	return 0;
3163}
3164EXPORT_SYMBOL(pcix_set_mmrbc);
3165
3166/**
3167 * pcie_get_readrq - get PCI Express read request size
3168 * @dev: PCI device to query
3169 *
3170 * Returns maximum memory read request in bytes
3171 *    or appropriate error value.
3172 */
3173int pcie_get_readrq(struct pci_dev *dev)
3174{
3175	int ret, cap;
3176	u16 ctl;
3177
3178	cap = pci_pcie_cap(dev);
3179	if (!cap)
3180		return -EINVAL;
3181
3182	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3183	if (!ret)
3184		ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3185
3186	return ret;
3187}
3188EXPORT_SYMBOL(pcie_get_readrq);
3189
3190/**
3191 * pcie_set_readrq - set PCI Express maximum memory read request
3192 * @dev: PCI device to query
3193 * @rq: maximum memory read count in bytes
3194 *    valid values are 128, 256, 512, 1024, 2048, 4096
3195 *
3196 * If possible sets maximum memory read request in bytes
3197 */
3198int pcie_set_readrq(struct pci_dev *dev, int rq)
3199{
3200	int cap, err = -EINVAL;
3201	u16 ctl, v;
3202
3203	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3204		goto out;
3205
3206	v = (ffs(rq) - 8) << 12;
3207
3208	cap = pci_pcie_cap(dev);
3209	if (!cap)
3210		goto out;
3211
3212	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3213	if (err)
3214		goto out;
 
 
 
 
 
3215
3216	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3217		ctl &= ~PCI_EXP_DEVCTL_READRQ;
3218		ctl |= v;
3219		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3220	}
3221
3222out:
3223	return err;
 
 
3224}
3225EXPORT_SYMBOL(pcie_set_readrq);
3226
3227/**
3228 * pcie_get_mps - get PCI Express maximum payload size
3229 * @dev: PCI device to query
3230 *
3231 * Returns maximum payload size in bytes
3232 *    or appropriate error value.
3233 */
3234int pcie_get_mps(struct pci_dev *dev)
3235{
3236	int ret, cap;
3237	u16 ctl;
3238
3239	cap = pci_pcie_cap(dev);
3240	if (!cap)
3241		return -EINVAL;
3242
3243	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3244	if (!ret)
3245		ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3246
3247	return ret;
3248}
 
3249
3250/**
3251 * pcie_set_mps - set PCI Express maximum payload size
3252 * @dev: PCI device to query
3253 * @mps: maximum payload size in bytes
3254 *    valid values are 128, 256, 512, 1024, 2048, 4096
3255 *
3256 * If possible sets maximum payload size
3257 */
3258int pcie_set_mps(struct pci_dev *dev, int mps)
3259{
3260	int cap, err = -EINVAL;
3261	u16 ctl, v;
3262
3263	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3264		goto out;
3265
3266	v = ffs(mps) - 8;
3267	if (v > dev->pcie_mpss) 
3268		goto out;
3269	v <<= 5;
3270
3271	cap = pci_pcie_cap(dev);
3272	if (!cap)
3273		goto out;
 
3274
3275	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3276	if (err)
3277		goto out;
 
 
 
 
 
 
 
 
 
 
3278
3279	if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3280		ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3281		ctl |= v;
3282		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3283	}
3284out:
3285	return err;
3286}
 
3287
3288/**
3289 * pci_select_bars - Make BAR mask from the type of resource
3290 * @dev: the PCI device for which BAR mask is made
3291 * @flags: resource type mask to be selected
3292 *
3293 * This helper routine makes bar mask from the type of resource.
3294 */
3295int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3296{
3297	int i, bars = 0;
3298	for (i = 0; i < PCI_NUM_RESOURCES; i++)
3299		if (pci_resource_flags(dev, i) & flags)
3300			bars |= (1 << i);
3301	return bars;
3302}
 
3303
3304/**
3305 * pci_resource_bar - get position of the BAR associated with a resource
3306 * @dev: the PCI device
3307 * @resno: the resource number
3308 * @type: the BAR type to be filled in
3309 *
3310 * Returns BAR position in config space, or 0 if the BAR is invalid.
3311 */
3312int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3313{
3314	int reg;
3315
3316	if (resno < PCI_ROM_RESOURCE) {
3317		*type = pci_bar_unknown;
3318		return PCI_BASE_ADDRESS_0 + 4 * resno;
3319	} else if (resno == PCI_ROM_RESOURCE) {
3320		*type = pci_bar_mem32;
3321		return dev->rom_base_reg;
3322	} else if (resno < PCI_BRIDGE_RESOURCES) {
3323		/* device specific resource */
3324		reg = pci_iov_resource_bar(dev, resno, type);
 
3325		if (reg)
3326			return reg;
3327	}
3328
3329	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3330	return 0;
3331}
3332
3333/* Some architectures require additional programming to enable VGA */
3334static arch_set_vga_state_t arch_set_vga_state;
3335
3336void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3337{
3338	arch_set_vga_state = func;	/* NULL disables */
3339}
3340
3341static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3342		      unsigned int command_bits, u32 flags)
3343{
3344	if (arch_set_vga_state)
3345		return arch_set_vga_state(dev, decode, command_bits,
3346						flags);
3347	return 0;
3348}
3349
3350/**
3351 * pci_set_vga_state - set VGA decode state on device and parents if requested
3352 * @dev: the PCI device
3353 * @decode: true = enable decoding, false = disable decoding
3354 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3355 * @flags: traverse ancestors and change bridges
3356 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3357 */
3358int pci_set_vga_state(struct pci_dev *dev, bool decode,
3359		      unsigned int command_bits, u32 flags)
3360{
3361	struct pci_bus *bus;
3362	struct pci_dev *bridge;
3363	u16 cmd;
3364	int rc;
3365
3366	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3367
3368	/* ARCH specific VGA enables */
3369	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3370	if (rc)
3371		return rc;
3372
3373	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3374		pci_read_config_word(dev, PCI_COMMAND, &cmd);
3375		if (decode == true)
3376			cmd |= command_bits;
3377		else
3378			cmd &= ~command_bits;
3379		pci_write_config_word(dev, PCI_COMMAND, cmd);
3380	}
3381
3382	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3383		return 0;
3384
3385	bus = dev->bus;
3386	while (bus) {
3387		bridge = bus->self;
3388		if (bridge) {
3389			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3390					     &cmd);
3391			if (decode == true)
3392				cmd |= PCI_BRIDGE_CTL_VGA;
3393			else
3394				cmd &= ~PCI_BRIDGE_CTL_VGA;
3395			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3396					      cmd);
3397		}
3398		bus = bus->parent;
3399	}
3400	return 0;
3401}
3402
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3403#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3404static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3405static DEFINE_SPINLOCK(resource_alignment_lock);
3406
3407/**
3408 * pci_specified_resource_alignment - get resource alignment specified by user.
3409 * @dev: the PCI device to get
3410 *
3411 * RETURNS: Resource alignment if it is specified.
3412 *          Zero if it is not specified.
3413 */
3414resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3415{
3416	int seg, bus, slot, func, align_order, count;
3417	resource_size_t align = 0;
3418	char *p;
3419
3420	spin_lock(&resource_alignment_lock);
3421	p = resource_alignment_param;
3422	while (*p) {
3423		count = 0;
3424		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3425							p[count] == '@') {
3426			p += count + 1;
3427		} else {
3428			align_order = -1;
3429		}
3430		if (sscanf(p, "%x:%x:%x.%x%n",
3431			&seg, &bus, &slot, &func, &count) != 4) {
3432			seg = 0;
3433			if (sscanf(p, "%x:%x.%x%n",
3434					&bus, &slot, &func, &count) != 3) {
3435				/* Invalid format */
3436				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3437					p);
3438				break;
3439			}
3440		}
3441		p += count;
3442		if (seg == pci_domain_nr(dev->bus) &&
3443			bus == dev->bus->number &&
3444			slot == PCI_SLOT(dev->devfn) &&
3445			func == PCI_FUNC(dev->devfn)) {
3446			if (align_order == -1) {
3447				align = PAGE_SIZE;
3448			} else {
3449				align = 1 << align_order;
3450			}
3451			/* Found */
3452			break;
3453		}
3454		if (*p != ';' && *p != ',') {
3455			/* End of param or invalid format */
3456			break;
3457		}
3458		p++;
3459	}
3460	spin_unlock(&resource_alignment_lock);
3461	return align;
3462}
3463
3464/**
3465 * pci_is_reassigndev - check if specified PCI is target device to reassign
3466 * @dev: the PCI device to check
3467 *
3468 * RETURNS: non-zero for PCI device is a target device to reassign,
3469 *          or zero is not.
3470 */
3471int pci_is_reassigndev(struct pci_dev *dev)
3472{
3473	return (pci_specified_resource_alignment(dev) != 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3474}
3475
3476ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3477{
3478	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3479		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3480	spin_lock(&resource_alignment_lock);
3481	strncpy(resource_alignment_param, buf, count);
3482	resource_alignment_param[count] = '\0';
3483	spin_unlock(&resource_alignment_lock);
3484	return count;
3485}
3486
3487ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3488{
3489	size_t count;
3490	spin_lock(&resource_alignment_lock);
3491	count = snprintf(buf, size, "%s", resource_alignment_param);
3492	spin_unlock(&resource_alignment_lock);
3493	return count;
3494}
3495
3496static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3497{
3498	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3499}
3500
3501static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3502					const char *buf, size_t count)
3503{
3504	return pci_set_resource_alignment_param(buf, count);
3505}
3506
3507BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3508					pci_resource_alignment_store);
3509
3510static int __init pci_resource_alignment_sysfs_init(void)
3511{
3512	return bus_create_file(&pci_bus_type,
3513					&bus_attr_resource_alignment);
3514}
3515
3516late_initcall(pci_resource_alignment_sysfs_init);
3517
3518static void __devinit pci_no_domains(void)
3519{
3520#ifdef CONFIG_PCI_DOMAINS
3521	pci_domains_supported = 0;
3522#endif
3523}
3524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3525/**
3526 * pci_ext_cfg_enabled - can we access extended PCI config space?
3527 * @dev: The PCI device of the root bridge.
3528 *
3529 * Returns 1 if we can access PCI extended config space (offsets
3530 * greater than 0xff). This is the default implementation. Architecture
3531 * implementations can override this.
3532 */
3533int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3534{
3535	return 1;
3536}
3537
3538void __weak pci_fixup_cardbus(struct pci_bus *bus)
3539{
3540}
3541EXPORT_SYMBOL(pci_fixup_cardbus);
3542
3543static int __init pci_setup(char *str)
3544{
3545	while (str) {
3546		char *k = strchr(str, ',');
3547		if (k)
3548			*k++ = 0;
3549		if (*str && (str = pcibios_setup(str)) && *str) {
3550			if (!strcmp(str, "nomsi")) {
3551				pci_no_msi();
3552			} else if (!strcmp(str, "noaer")) {
3553				pci_no_aer();
 
 
3554			} else if (!strncmp(str, "realloc", 7)) {
3555				pci_realloc();
3556			} else if (!strcmp(str, "nodomains")) {
3557				pci_no_domains();
 
 
3558			} else if (!strncmp(str, "cbiosize=", 9)) {
3559				pci_cardbus_io_size = memparse(str + 9, &str);
3560			} else if (!strncmp(str, "cbmemsize=", 10)) {
3561				pci_cardbus_mem_size = memparse(str + 10, &str);
3562			} else if (!strncmp(str, "resource_alignment=", 19)) {
3563				pci_set_resource_alignment_param(str + 19,
3564							strlen(str + 19));
3565			} else if (!strncmp(str, "ecrc=", 5)) {
3566				pcie_ecrc_get_policy(str + 5);
3567			} else if (!strncmp(str, "hpiosize=", 9)) {
3568				pci_hotplug_io_size = memparse(str + 9, &str);
3569			} else if (!strncmp(str, "hpmemsize=", 10)) {
3570				pci_hotplug_mem_size = memparse(str + 10, &str);
3571			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3572				pcie_bus_config = PCIE_BUS_TUNE_OFF;
3573			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
3574				pcie_bus_config = PCIE_BUS_SAFE;
3575			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
3576				pcie_bus_config = PCIE_BUS_PERFORMANCE;
3577			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3578				pcie_bus_config = PCIE_BUS_PEER2PEER;
 
 
3579			} else {
3580				printk(KERN_ERR "PCI: Unknown option `%s'\n",
3581						str);
3582			}
3583		}
3584		str = k;
3585	}
3586	return 0;
3587}
3588early_param("pci", pci_setup);
3589
3590EXPORT_SYMBOL(pci_reenable_device);
3591EXPORT_SYMBOL(pci_enable_device_io);
3592EXPORT_SYMBOL(pci_enable_device_mem);
3593EXPORT_SYMBOL(pci_enable_device);
3594EXPORT_SYMBOL(pcim_enable_device);
3595EXPORT_SYMBOL(pcim_pin_device);
3596EXPORT_SYMBOL(pci_disable_device);
3597EXPORT_SYMBOL(pci_find_capability);
3598EXPORT_SYMBOL(pci_bus_find_capability);
3599EXPORT_SYMBOL(pci_release_regions);
3600EXPORT_SYMBOL(pci_request_regions);
3601EXPORT_SYMBOL(pci_request_regions_exclusive);
3602EXPORT_SYMBOL(pci_release_region);
3603EXPORT_SYMBOL(pci_request_region);
3604EXPORT_SYMBOL(pci_request_region_exclusive);
3605EXPORT_SYMBOL(pci_release_selected_regions);
3606EXPORT_SYMBOL(pci_request_selected_regions);
3607EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3608EXPORT_SYMBOL(pci_set_master);
3609EXPORT_SYMBOL(pci_clear_master);
3610EXPORT_SYMBOL(pci_set_mwi);
3611EXPORT_SYMBOL(pci_try_set_mwi);
3612EXPORT_SYMBOL(pci_clear_mwi);
3613EXPORT_SYMBOL_GPL(pci_intx);
3614EXPORT_SYMBOL(pci_assign_resource);
3615EXPORT_SYMBOL(pci_find_parent_resource);
3616EXPORT_SYMBOL(pci_select_bars);
3617
3618EXPORT_SYMBOL(pci_set_power_state);
3619EXPORT_SYMBOL(pci_save_state);
3620EXPORT_SYMBOL(pci_restore_state);
3621EXPORT_SYMBOL(pci_pme_capable);
3622EXPORT_SYMBOL(pci_pme_active);
3623EXPORT_SYMBOL(pci_wake_from_d3);
3624EXPORT_SYMBOL(pci_target_state);
3625EXPORT_SYMBOL(pci_prepare_to_sleep);
3626EXPORT_SYMBOL(pci_back_from_sleep);
3627EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
v4.6
   1/*
   2 *	PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *	David Mosberger-Tang
   6 *
   7 *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
  12#include <linux/init.h>
  13#include <linux/of.h>
  14#include <linux/of_pci.h>
  15#include <linux/pci.h>
  16#include <linux/pm.h>
  17#include <linux/slab.h>
  18#include <linux/module.h>
  19#include <linux/spinlock.h>
  20#include <linux/string.h>
  21#include <linux/log2.h>
  22#include <linux/pci-aspm.h>
  23#include <linux/pm_wakeup.h>
  24#include <linux/interrupt.h>
  25#include <linux/device.h>
  26#include <linux/pm_runtime.h>
  27#include <linux/pci_hotplug.h>
  28#include <asm/setup.h>
  29#include <linux/aer.h>
  30#include "pci.h"
  31
  32const char *pci_power_names[] = {
  33	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  34};
  35EXPORT_SYMBOL_GPL(pci_power_names);
  36
  37int isa_dma_bridge_buggy;
  38EXPORT_SYMBOL(isa_dma_bridge_buggy);
  39
  40int pci_pci_problems;
  41EXPORT_SYMBOL(pci_pci_problems);
  42
  43unsigned int pci_pm_d3_delay;
  44
  45static void pci_pme_list_scan(struct work_struct *work);
  46
  47static LIST_HEAD(pci_pme_list);
  48static DEFINE_MUTEX(pci_pme_list_mutex);
  49static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  50
  51struct pci_pme_device {
  52	struct list_head list;
  53	struct pci_dev *dev;
  54};
  55
  56#define PME_TIMEOUT 1000 /* How long between PME checks */
  57
  58static void pci_dev_d3_sleep(struct pci_dev *dev)
  59{
  60	unsigned int delay = dev->d3_delay;
  61
  62	if (delay < pci_pm_d3_delay)
  63		delay = pci_pm_d3_delay;
  64
  65	msleep(delay);
  66}
  67
  68#ifdef CONFIG_PCI_DOMAINS
  69int pci_domains_supported = 1;
  70#endif
  71
  72#define DEFAULT_CARDBUS_IO_SIZE		(256)
  73#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  74/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  75unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  76unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  77
  78#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  79#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  80/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  81unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  82unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  83
  84enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
  85
  86/*
  87 * The default CLS is used if arch didn't set CLS explicitly and not
  88 * all pci devices agree on the same value.  Arch can override either
  89 * the dfl or actual value as it sees fit.  Don't forget this is
  90 * measured in 32-bit words, not bytes.
  91 */
  92u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
  93u8 pci_cache_line_size;
  94
  95/*
  96 * If we set up a device for bus mastering, we need to check the latency
  97 * timer as certain BIOSes forget to set it properly.
  98 */
  99unsigned int pcibios_max_latency = 255;
 100
 101/* If set, the PCIe ARI capability will not be used. */
 102static bool pcie_ari_disabled;
 103
 104/**
 105 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 106 * @bus: pointer to PCI bus structure to search
 107 *
 108 * Given a PCI bus, returns the highest PCI bus number present in the set
 109 * including the given PCI bus and its list of child PCI buses.
 110 */
 111unsigned char pci_bus_max_busnr(struct pci_bus *bus)
 112{
 113	struct pci_bus *tmp;
 114	unsigned char max, n;
 115
 116	max = bus->busn_res.end;
 117	list_for_each_entry(tmp, &bus->children, node) {
 118		n = pci_bus_max_busnr(tmp);
 119		if (n > max)
 120			max = n;
 121	}
 122	return max;
 123}
 124EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 125
 126#ifdef CONFIG_HAS_IOMEM
 127void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 128{
 129	struct resource *res = &pdev->resource[bar];
 130
 131	/*
 132	 * Make sure the BAR is actually a memory resource, not an IO resource
 133	 */
 134	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
 135		dev_warn(&pdev->dev, "can't ioremap BAR %d: %pR\n", bar, res);
 136		return NULL;
 137	}
 138	return ioremap_nocache(res->start, resource_size(res));
 
 139}
 140EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 
 141
 142void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
 
 
 
 
 
 
 
 
 143{
 144	/*
 145	 * Make sure the BAR is actually a memory resource, not an IO resource
 146	 */
 147	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 148		WARN_ON(1);
 149		return NULL;
 
 
 150	}
 151	return ioremap_wc(pci_resource_start(pdev, bar),
 152			  pci_resource_len(pdev, bar));
 153}
 154EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
 155#endif
 156
 
 
 
 157
 158static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 159				   u8 pos, int cap, int *ttl)
 160{
 161	u8 id;
 162	u16 ent;
 163
 164	pci_bus_read_config_byte(bus, devfn, pos, &pos);
 165
 166	while ((*ttl)--) {
 
 167		if (pos < 0x40)
 168			break;
 169		pos &= ~3;
 170		pci_bus_read_config_word(bus, devfn, pos, &ent);
 171
 172		id = ent & 0xff;
 173		if (id == 0xff)
 174			break;
 175		if (id == cap)
 176			return pos;
 177		pos = (ent >> 8);
 178	}
 179	return 0;
 180}
 181
 182static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 183			       u8 pos, int cap)
 184{
 185	int ttl = PCI_FIND_CAP_TTL;
 186
 187	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 188}
 189
 190int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 191{
 192	return __pci_find_next_cap(dev->bus, dev->devfn,
 193				   pos + PCI_CAP_LIST_NEXT, cap);
 194}
 195EXPORT_SYMBOL_GPL(pci_find_next_capability);
 196
 197static int __pci_bus_find_cap_start(struct pci_bus *bus,
 198				    unsigned int devfn, u8 hdr_type)
 199{
 200	u16 status;
 201
 202	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 203	if (!(status & PCI_STATUS_CAP_LIST))
 204		return 0;
 205
 206	switch (hdr_type) {
 207	case PCI_HEADER_TYPE_NORMAL:
 208	case PCI_HEADER_TYPE_BRIDGE:
 209		return PCI_CAPABILITY_LIST;
 210	case PCI_HEADER_TYPE_CARDBUS:
 211		return PCI_CB_CAPABILITY_LIST;
 
 
 212	}
 213
 214	return 0;
 215}
 216
 217/**
 218 * pci_find_capability - query for devices' capabilities
 219 * @dev: PCI device to query
 220 * @cap: capability code
 221 *
 222 * Tell if a device supports a given PCI capability.
 223 * Returns the address of the requested capability structure within the
 224 * device's PCI configuration space or 0 in case the device does not
 225 * support it.  Possible values for @cap:
 226 *
 227 *  %PCI_CAP_ID_PM           Power Management
 228 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
 229 *  %PCI_CAP_ID_VPD          Vital Product Data
 230 *  %PCI_CAP_ID_SLOTID       Slot Identification
 231 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 232 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
 233 *  %PCI_CAP_ID_PCIX         PCI-X
 234 *  %PCI_CAP_ID_EXP          PCI Express
 235 */
 236int pci_find_capability(struct pci_dev *dev, int cap)
 237{
 238	int pos;
 239
 240	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 241	if (pos)
 242		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 243
 244	return pos;
 245}
 246EXPORT_SYMBOL(pci_find_capability);
 247
 248/**
 249 * pci_bus_find_capability - query for devices' capabilities
 250 * @bus:   the PCI bus to query
 251 * @devfn: PCI device to query
 252 * @cap:   capability code
 253 *
 254 * Like pci_find_capability() but works for pci devices that do not have a
 255 * pci_dev structure set up yet.
 256 *
 257 * Returns the address of the requested capability structure within the
 258 * device's PCI configuration space or 0 in case the device does not
 259 * support it.
 260 */
 261int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 262{
 263	int pos;
 264	u8 hdr_type;
 265
 266	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 267
 268	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 269	if (pos)
 270		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 271
 272	return pos;
 273}
 274EXPORT_SYMBOL(pci_bus_find_capability);
 275
 276/**
 277 * pci_find_next_ext_capability - Find an extended capability
 278 * @dev: PCI device to query
 279 * @start: address at which to start looking (0 to start at beginning of list)
 280 * @cap: capability code
 281 *
 282 * Returns the address of the next matching extended capability structure
 283 * within the device's PCI configuration space or 0 if the device does
 284 * not support it.  Some capabilities can occur several times, e.g., the
 285 * vendor-specific capability, and this provides a way to find them all.
 
 
 
 
 286 */
 287int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
 288{
 289	u32 header;
 290	int ttl;
 291	int pos = PCI_CFG_SPACE_SIZE;
 292
 293	/* minimum 8 bytes per capability */
 294	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 295
 296	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 297		return 0;
 298
 299	if (start)
 300		pos = start;
 301
 302	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 303		return 0;
 304
 305	/*
 306	 * If we have no capabilities, this is indicated by cap ID,
 307	 * cap version and next pointer all being 0.
 308	 */
 309	if (header == 0)
 310		return 0;
 311
 312	while (ttl-- > 0) {
 313		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 314			return pos;
 315
 316		pos = PCI_EXT_CAP_NEXT(header);
 317		if (pos < PCI_CFG_SPACE_SIZE)
 318			break;
 319
 320		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 321			break;
 322	}
 323
 324	return 0;
 325}
 326EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 327
 328/**
 329 * pci_find_ext_capability - Find an extended capability
 330 * @dev: PCI device to query
 331 * @cap: capability code
 
 332 *
 333 * Returns the address of the requested extended capability structure
 334 * within the device's PCI configuration space or 0 if the device does
 335 * not support it.  Possible values for @cap:
 336 *
 337 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 338 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 339 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 340 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 341 */
 342int pci_find_ext_capability(struct pci_dev *dev, int cap)
 
 343{
 344	return pci_find_next_ext_capability(dev, 0, cap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 345}
 346EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 347
 348static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 349{
 350	int rc, ttl = PCI_FIND_CAP_TTL;
 351	u8 cap, mask;
 352
 353	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 354		mask = HT_3BIT_CAP_MASK;
 355	else
 356		mask = HT_5BIT_CAP_MASK;
 357
 358	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 359				      PCI_CAP_ID_HT, &ttl);
 360	while (pos) {
 361		rc = pci_read_config_byte(dev, pos + 3, &cap);
 362		if (rc != PCIBIOS_SUCCESSFUL)
 363			return 0;
 364
 365		if ((cap & mask) == ht_cap)
 366			return pos;
 367
 368		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 369					      pos + PCI_CAP_LIST_NEXT,
 370					      PCI_CAP_ID_HT, &ttl);
 371	}
 372
 373	return 0;
 374}
 375/**
 376 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 377 * @dev: PCI device to query
 378 * @pos: Position from which to continue searching
 379 * @ht_cap: Hypertransport capability code
 380 *
 381 * To be used in conjunction with pci_find_ht_capability() to search for
 382 * all capabilities matching @ht_cap. @pos should always be a value returned
 383 * from pci_find_ht_capability().
 384 *
 385 * NB. To be 100% safe against broken PCI devices, the caller should take
 386 * steps to avoid an infinite loop.
 387 */
 388int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 389{
 390	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 391}
 392EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 393
 394/**
 395 * pci_find_ht_capability - query a device's Hypertransport capabilities
 396 * @dev: PCI device to query
 397 * @ht_cap: Hypertransport capability code
 398 *
 399 * Tell if a device supports a given Hypertransport capability.
 400 * Returns an address within the device's PCI configuration space
 401 * or 0 in case the device does not support the request capability.
 402 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 403 * which has a Hypertransport capability matching @ht_cap.
 404 */
 405int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 406{
 407	int pos;
 408
 409	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 410	if (pos)
 411		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 412
 413	return pos;
 414}
 415EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 416
 417/**
 418 * pci_find_parent_resource - return resource region of parent bus of given region
 419 * @dev: PCI device structure contains resources to be searched
 420 * @res: child resource record for which parent is sought
 421 *
 422 *  For given resource region of given device, return the resource
 423 *  region of parent bus the given region is contained in.
 
 424 */
 425struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 426					  struct resource *res)
 427{
 428	const struct pci_bus *bus = dev->bus;
 429	struct resource *r;
 430	int i;
 
 431
 432	pci_bus_for_each_resource(bus, r, i) {
 433		if (!r)
 434			continue;
 435		if (res->start && resource_contains(r, res)) {
 436
 437			/*
 438			 * If the window is prefetchable but the BAR is
 439			 * not, the allocator made a mistake.
 440			 */
 441			if (r->flags & IORESOURCE_PREFETCH &&
 442			    !(res->flags & IORESOURCE_PREFETCH))
 443				return NULL;
 444
 445			/*
 446			 * If we're below a transparent bridge, there may
 447			 * be both a positively-decoded aperture and a
 448			 * subtractively-decoded region that contain the BAR.
 449			 * We want the positively-decoded one, so this depends
 450			 * on pci_bus_for_each_resource() giving us those
 451			 * first.
 452			 */
 453			return r;
 454		}
 455	}
 456	return NULL;
 457}
 458EXPORT_SYMBOL(pci_find_parent_resource);
 459
 460/**
 461 * pci_find_pcie_root_port - return PCIe Root Port
 462 * @dev: PCI device to query
 463 *
 464 * Traverse up the parent chain and return the PCIe Root Port PCI Device
 465 * for a given PCI Device.
 466 */
 467struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
 468{
 469	struct pci_dev *bridge, *highest_pcie_bridge = NULL;
 470
 471	bridge = pci_upstream_bridge(dev);
 472	while (bridge && pci_is_pcie(bridge)) {
 473		highest_pcie_bridge = bridge;
 474		bridge = pci_upstream_bridge(bridge);
 475	}
 476
 477	if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
 478		return NULL;
 479
 480	return highest_pcie_bridge;
 481}
 482EXPORT_SYMBOL(pci_find_pcie_root_port);
 483
 484/**
 485 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
 486 * @dev: the PCI device to operate on
 487 * @pos: config space offset of status word
 488 * @mask: mask of bit(s) to care about in status word
 489 *
 490 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
 491 */
 492int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
 493{
 494	int i;
 495
 496	/* Wait for Transaction Pending bit clean */
 497	for (i = 0; i < 4; i++) {
 498		u16 status;
 499		if (i)
 500			msleep((1 << (i - 1)) * 100);
 501
 502		pci_read_config_word(dev, pos, &status);
 503		if (!(status & mask))
 504			return 1;
 505	}
 506
 507	return 0;
 508}
 509
 510/**
 511 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
 512 * @dev: PCI device to have its BARs restored
 513 *
 514 * Restore the BAR values for a given device, so as to make it
 515 * accessible by its driver.
 516 */
 517static void pci_restore_bars(struct pci_dev *dev)
 
 518{
 519	int i;
 520
 521	/* Per SR-IOV spec 3.4.1.11, VF BARs are RO zero */
 522	if (dev->is_virtfn)
 523		return;
 524
 525	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 526		pci_update_resource(dev, i);
 527}
 528
 529static const struct pci_platform_pm_ops *pci_platform_pm;
 530
 531int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
 532{
 533	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 534	    || !ops->sleep_wake)
 535		return -EINVAL;
 536	pci_platform_pm = ops;
 537	return 0;
 538}
 539
 540static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 541{
 542	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 543}
 544
 545static inline int platform_pci_set_power_state(struct pci_dev *dev,
 546					       pci_power_t t)
 547{
 548	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 549}
 550
 551static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 552{
 553	return pci_platform_pm ?
 554			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 555}
 556
 
 
 
 
 
 557static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 558{
 559	return pci_platform_pm ?
 560			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 561}
 562
 563static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 564{
 565	return pci_platform_pm ?
 566			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 567}
 568
 569static inline bool platform_pci_need_resume(struct pci_dev *dev)
 570{
 571	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
 572}
 573
 574/**
 575 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 576 *                           given PCI device
 577 * @dev: PCI device to handle.
 578 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 579 *
 580 * RETURN VALUE:
 581 * -EINVAL if the requested state is invalid.
 582 * -EIO if device does not support PCI PM or its PM capabilities register has a
 583 * wrong version, or device doesn't support the requested state.
 584 * 0 if device already is in the requested state.
 585 * 0 if device's power state has been successfully changed.
 586 */
 587static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 588{
 589	u16 pmcsr;
 590	bool need_restore = false;
 591
 592	/* Check if we're already there */
 593	if (dev->current_state == state)
 594		return 0;
 595
 596	if (!dev->pm_cap)
 597		return -EIO;
 598
 599	if (state < PCI_D0 || state > PCI_D3hot)
 600		return -EINVAL;
 601
 602	/* Validate current state:
 603	 * Can enter D0 from any state, but if we can only go deeper
 604	 * to sleep if we're already in a low power state
 605	 */
 606	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 607	    && dev->current_state > state) {
 608		dev_err(&dev->dev, "invalid power transition (from state %d to %d)\n",
 609			dev->current_state, state);
 610		return -EINVAL;
 611	}
 612
 613	/* check if this device supports the desired state */
 614	if ((state == PCI_D1 && !dev->d1_support)
 615	   || (state == PCI_D2 && !dev->d2_support))
 616		return -EIO;
 617
 618	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 619
 620	/* If we're (effectively) in D3, force entire word to 0.
 621	 * This doesn't affect PME_Status, disables PME_En, and
 622	 * sets PowerState to 0.
 623	 */
 624	switch (dev->current_state) {
 625	case PCI_D0:
 626	case PCI_D1:
 627	case PCI_D2:
 628		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 629		pmcsr |= state;
 630		break;
 631	case PCI_D3hot:
 632	case PCI_D3cold:
 633	case PCI_UNKNOWN: /* Boot-up */
 634		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 635		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 636			need_restore = true;
 637		/* Fall-through: force to D0 */
 638	default:
 639		pmcsr = 0;
 640		break;
 641	}
 642
 643	/* enter specified state */
 644	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 645
 646	/* Mandatory power management transition delays */
 647	/* see PCI PM 1.1 5.6.1 table 18 */
 648	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 649		pci_dev_d3_sleep(dev);
 650	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 651		udelay(PCI_PM_D2_DELAY);
 652
 653	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 654	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 655	if (dev->current_state != state && printk_ratelimit())
 656		dev_info(&dev->dev, "Refused to change power state, currently in D%d\n",
 657			 dev->current_state);
 658
 659	/*
 660	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 661	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 662	 * from D3hot to D0 _may_ perform an internal reset, thereby
 663	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 664	 * For example, at least some versions of the 3c905B and the
 665	 * 3c556B exhibit this behaviour.
 666	 *
 667	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 668	 * devices in a D3hot state at boot.  Consequently, we need to
 669	 * restore at least the BARs so that the device will be
 670	 * accessible to its driver.
 671	 */
 672	if (need_restore)
 673		pci_restore_bars(dev);
 674
 675	if (dev->bus->self)
 676		pcie_aspm_pm_state_change(dev->bus->self);
 677
 678	return 0;
 679}
 680
 681/**
 682 * pci_update_current_state - Read PCI power state of given device from its
 683 *                            PCI PM registers and cache it
 684 * @dev: PCI device to handle.
 685 * @state: State to cache in case the device doesn't have the PM capability
 686 */
 687void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 688{
 689	if (dev->pm_cap) {
 690		u16 pmcsr;
 691
 692		/*
 693		 * Configuration space is not accessible for device in
 694		 * D3cold, so just keep or set D3cold for safety
 695		 */
 696		if (dev->current_state == PCI_D3cold)
 697			return;
 698		if (state == PCI_D3cold) {
 699			dev->current_state = PCI_D3cold;
 700			return;
 701		}
 702		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 703		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 704	} else {
 705		dev->current_state = state;
 706	}
 707}
 708
 709/**
 710 * pci_power_up - Put the given device into D0 forcibly
 711 * @dev: PCI device to power up
 712 */
 713void pci_power_up(struct pci_dev *dev)
 714{
 715	if (platform_pci_power_manageable(dev))
 716		platform_pci_set_power_state(dev, PCI_D0);
 717
 718	pci_raw_set_power_state(dev, PCI_D0);
 719	pci_update_current_state(dev, PCI_D0);
 720}
 721
 722/**
 723 * pci_platform_power_transition - Use platform to change device power state
 724 * @dev: PCI device to handle.
 725 * @state: State to put the device into.
 726 */
 727static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 728{
 729	int error;
 730
 731	if (platform_pci_power_manageable(dev)) {
 732		error = platform_pci_set_power_state(dev, state);
 733		if (!error)
 734			pci_update_current_state(dev, state);
 735	} else
 736		error = -ENODEV;
 737
 738	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
 739		dev->current_state = PCI_D0;
 
 740
 741	return error;
 742}
 743
 744/**
 745 * pci_wakeup - Wake up a PCI device
 746 * @pci_dev: Device to handle.
 747 * @ign: ignored parameter
 748 */
 749static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
 750{
 751	pci_wakeup_event(pci_dev);
 752	pm_request_resume(&pci_dev->dev);
 753	return 0;
 754}
 755
 756/**
 757 * pci_wakeup_bus - Walk given bus and wake up devices on it
 758 * @bus: Top bus of the subtree to walk.
 759 */
 760static void pci_wakeup_bus(struct pci_bus *bus)
 761{
 762	if (bus)
 763		pci_walk_bus(bus, pci_wakeup, NULL);
 764}
 765
 766/**
 767 * __pci_start_power_transition - Start power transition of a PCI device
 768 * @dev: PCI device to handle.
 769 * @state: State to put the device into.
 770 */
 771static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 772{
 773	if (state == PCI_D0) {
 774		pci_platform_power_transition(dev, PCI_D0);
 775		/*
 776		 * Mandatory power management transition delays, see
 777		 * PCI Express Base Specification Revision 2.0 Section
 778		 * 6.6.1: Conventional Reset.  Do not delay for
 779		 * devices powered on/off by corresponding bridge,
 780		 * because have already delayed for the bridge.
 781		 */
 782		if (dev->runtime_d3cold) {
 783			msleep(dev->d3cold_delay);
 784			/*
 785			 * When powering on a bridge from D3cold, the
 786			 * whole hierarchy may be powered on into
 787			 * D0uninitialized state, resume them to give
 788			 * them a chance to suspend again
 789			 */
 790			pci_wakeup_bus(dev->subordinate);
 791		}
 792	}
 793}
 794
 795/**
 796 * __pci_dev_set_current_state - Set current state of a PCI device
 797 * @dev: Device to handle
 798 * @data: pointer to state to be set
 799 */
 800static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
 801{
 802	pci_power_t state = *(pci_power_t *)data;
 803
 804	dev->current_state = state;
 805	return 0;
 806}
 807
 808/**
 809 * __pci_bus_set_current_state - Walk given bus and set current state of devices
 810 * @bus: Top bus of the subtree to walk.
 811 * @state: state to be set
 812 */
 813static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
 814{
 815	if (bus)
 816		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
 817}
 818
 819/**
 820 * __pci_complete_power_transition - Complete power transition of a PCI device
 821 * @dev: PCI device to handle.
 822 * @state: State to put the device into.
 823 *
 824 * This function should not be called directly by device drivers.
 825 */
 826int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 827{
 828	int ret;
 829
 830	if (state <= PCI_D0)
 831		return -EINVAL;
 832	ret = pci_platform_power_transition(dev, state);
 833	/* Power off the bridge may power off the whole hierarchy */
 834	if (!ret && state == PCI_D3cold)
 835		__pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
 836	return ret;
 837}
 838EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 839
 840/**
 841 * pci_set_power_state - Set the power state of a PCI device
 842 * @dev: PCI device to handle.
 843 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 844 *
 845 * Transition a device to a new power state, using the platform firmware and/or
 846 * the device's PCI PM registers.
 847 *
 848 * RETURN VALUE:
 849 * -EINVAL if the requested state is invalid.
 850 * -EIO if device does not support PCI PM or its PM capabilities register has a
 851 * wrong version, or device doesn't support the requested state.
 852 * 0 if device already is in the requested state.
 853 * 0 if device's power state has been successfully changed.
 854 */
 855int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 856{
 857	int error;
 858
 859	/* bound the state we're entering */
 860	if (state > PCI_D3cold)
 861		state = PCI_D3cold;
 862	else if (state < PCI_D0)
 863		state = PCI_D0;
 864	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 865		/*
 866		 * If the device or the parent bridge do not support PCI PM,
 867		 * ignore the request if we're doing anything other than putting
 868		 * it into D0 (which would only happen on boot).
 869		 */
 870		return 0;
 871
 872	/* Check if we're already there */
 873	if (dev->current_state == state)
 874		return 0;
 875
 876	__pci_start_power_transition(dev, state);
 877
 878	/* This device is quirked not to be put into D3, so
 879	   don't put it in D3 */
 880	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 881		return 0;
 882
 883	/*
 884	 * To put device in D3cold, we put device into D3hot in native
 885	 * way, then put device into D3cold with platform ops
 886	 */
 887	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
 888					PCI_D3hot : state);
 889
 890	if (!__pci_complete_power_transition(dev, state))
 891		error = 0;
 
 
 
 
 
 
 892
 893	return error;
 894}
 895EXPORT_SYMBOL(pci_set_power_state);
 896
 897/**
 898 * pci_choose_state - Choose the power state of a PCI device
 899 * @dev: PCI device to be suspended
 900 * @state: target sleep state for the whole system. This is the value
 901 *	that is passed to suspend() function.
 902 *
 903 * Returns PCI power state suitable for given device and given system
 904 * message.
 905 */
 906
 907pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 908{
 909	pci_power_t ret;
 910
 911	if (!dev->pm_cap)
 912		return PCI_D0;
 913
 914	ret = platform_pci_choose_state(dev);
 915	if (ret != PCI_POWER_ERROR)
 916		return ret;
 917
 918	switch (state.event) {
 919	case PM_EVENT_ON:
 920		return PCI_D0;
 921	case PM_EVENT_FREEZE:
 922	case PM_EVENT_PRETHAW:
 923		/* REVISIT both freeze and pre-thaw "should" use D0 */
 924	case PM_EVENT_SUSPEND:
 925	case PM_EVENT_HIBERNATE:
 926		return PCI_D3hot;
 927	default:
 928		dev_info(&dev->dev, "unrecognized suspend event %d\n",
 929			 state.event);
 930		BUG();
 931	}
 932	return PCI_D0;
 933}
 
 934EXPORT_SYMBOL(pci_choose_state);
 935
 936#define PCI_EXP_SAVE_REGS	7
 937
 938static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
 939						       u16 cap, bool extended)
 940{
 941	struct pci_cap_saved_state *tmp;
 942
 943	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
 944		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
 945			return tmp;
 946	}
 947	return NULL;
 948}
 949
 950struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
 951{
 952	return _pci_find_saved_cap(dev, cap, false);
 953}
 954
 955struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
 956{
 957	return _pci_find_saved_cap(dev, cap, true);
 958}
 959
 960static int pci_save_pcie_state(struct pci_dev *dev)
 961{
 962	int i = 0;
 963	struct pci_cap_saved_state *save_state;
 964	u16 *cap;
 
 965
 966	if (!pci_is_pcie(dev))
 
 967		return 0;
 968
 969	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 970	if (!save_state) {
 971		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 972		return -ENOMEM;
 973	}
 
 974
 975	cap = (u16 *)&save_state->cap.data[0];
 976	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
 977	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
 978	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
 979	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
 980	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
 981	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
 982	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
 
 
 
 
 
 
 
 
 983
 984	return 0;
 985}
 986
 987static void pci_restore_pcie_state(struct pci_dev *dev)
 988{
 989	int i = 0;
 990	struct pci_cap_saved_state *save_state;
 991	u16 *cap;
 
 992
 993	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 994	if (!save_state)
 
 995		return;
 
 996
 997	cap = (u16 *)&save_state->cap.data[0];
 998	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
 999	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1000	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1001	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1002	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1003	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1004	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
 
 
 
 
 
 
 
 
1005}
1006
1007
1008static int pci_save_pcix_state(struct pci_dev *dev)
1009{
1010	int pos;
1011	struct pci_cap_saved_state *save_state;
1012
1013	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1014	if (!pos)
1015		return 0;
1016
1017	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1018	if (!save_state) {
1019		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
1020		return -ENOMEM;
1021	}
1022
1023	pci_read_config_word(dev, pos + PCI_X_CMD,
1024			     (u16 *)save_state->cap.data);
1025
1026	return 0;
1027}
1028
1029static void pci_restore_pcix_state(struct pci_dev *dev)
1030{
1031	int i = 0, pos;
1032	struct pci_cap_saved_state *save_state;
1033	u16 *cap;
1034
1035	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1036	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1037	if (!save_state || !pos)
1038		return;
1039	cap = (u16 *)&save_state->cap.data[0];
1040
1041	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1042}
1043
1044
1045/**
1046 * pci_save_state - save the PCI configuration space of a device before suspending
1047 * @dev: - PCI device that we're dealing with
1048 */
1049int pci_save_state(struct pci_dev *dev)
 
1050{
1051	int i;
1052	/* XXX: 100% dword access ok here? */
1053	for (i = 0; i < 16; i++)
1054		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1055	dev->state_saved = true;
1056
1057	i = pci_save_pcie_state(dev);
1058	if (i != 0)
1059		return i;
1060
1061	i = pci_save_pcix_state(dev);
1062	if (i != 0)
1063		return i;
1064
1065	return pci_save_vc_state(dev);
1066}
1067EXPORT_SYMBOL(pci_save_state);
1068
1069static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1070				     u32 saved_val, int retry)
1071{
1072	u32 val;
1073
1074	pci_read_config_dword(pdev, offset, &val);
1075	if (val == saved_val)
1076		return;
1077
1078	for (;;) {
1079		dev_dbg(&pdev->dev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1080			offset, val, saved_val);
1081		pci_write_config_dword(pdev, offset, saved_val);
1082		if (retry-- <= 0)
1083			return;
1084
1085		pci_read_config_dword(pdev, offset, &val);
1086		if (val == saved_val)
1087			return;
1088
1089		mdelay(1);
1090	}
1091}
1092
1093static void pci_restore_config_space_range(struct pci_dev *pdev,
1094					   int start, int end, int retry)
1095{
1096	int index;
1097
1098	for (index = end; index >= start; index--)
1099		pci_restore_config_dword(pdev, 4 * index,
1100					 pdev->saved_config_space[index],
1101					 retry);
1102}
1103
1104static void pci_restore_config_space(struct pci_dev *pdev)
1105{
1106	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1107		pci_restore_config_space_range(pdev, 10, 15, 0);
1108		/* Restore BARs before the command register. */
1109		pci_restore_config_space_range(pdev, 4, 9, 10);
1110		pci_restore_config_space_range(pdev, 0, 3, 0);
1111	} else {
1112		pci_restore_config_space_range(pdev, 0, 15, 0);
1113	}
1114}
1115
1116/**
1117 * pci_restore_state - Restore the saved state of a PCI device
1118 * @dev: - PCI device that we're dealing with
1119 */
1120void pci_restore_state(struct pci_dev *dev)
1121{
 
 
 
1122	if (!dev->state_saved)
1123		return;
1124
1125	/* PCI Express register must be restored first */
1126	pci_restore_pcie_state(dev);
1127	pci_restore_ats_state(dev);
1128	pci_restore_vc_state(dev);
1129
1130	pci_cleanup_aer_error_status_regs(dev);
1131
1132	pci_restore_config_space(dev);
1133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1134	pci_restore_pcix_state(dev);
1135	pci_restore_msi_state(dev);
1136
1137	/* Restore ACS and IOV configuration state */
1138	pci_enable_acs(dev);
1139	pci_restore_iov_state(dev);
1140
1141	dev->state_saved = false;
1142}
1143EXPORT_SYMBOL(pci_restore_state);
1144
1145struct pci_saved_state {
1146	u32 config_space[16];
1147	struct pci_cap_saved_data cap[0];
1148};
1149
1150/**
1151 * pci_store_saved_state - Allocate and return an opaque struct containing
1152 *			   the device saved state.
1153 * @dev: PCI device that we're dealing with
1154 *
1155 * Return NULL if no state or error.
1156 */
1157struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1158{
1159	struct pci_saved_state *state;
1160	struct pci_cap_saved_state *tmp;
1161	struct pci_cap_saved_data *cap;
 
1162	size_t size;
1163
1164	if (!dev->state_saved)
1165		return NULL;
1166
1167	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1168
1169	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1170		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1171
1172	state = kzalloc(size, GFP_KERNEL);
1173	if (!state)
1174		return NULL;
1175
1176	memcpy(state->config_space, dev->saved_config_space,
1177	       sizeof(state->config_space));
1178
1179	cap = state->cap;
1180	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1181		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1182		memcpy(cap, &tmp->cap, len);
1183		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1184	}
1185	/* Empty cap_save terminates list */
1186
1187	return state;
1188}
1189EXPORT_SYMBOL_GPL(pci_store_saved_state);
1190
1191/**
1192 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1193 * @dev: PCI device that we're dealing with
1194 * @state: Saved state returned from pci_store_saved_state()
1195 */
1196int pci_load_saved_state(struct pci_dev *dev,
1197			 struct pci_saved_state *state)
1198{
1199	struct pci_cap_saved_data *cap;
1200
1201	dev->state_saved = false;
1202
1203	if (!state)
1204		return 0;
1205
1206	memcpy(dev->saved_config_space, state->config_space,
1207	       sizeof(state->config_space));
1208
1209	cap = state->cap;
1210	while (cap->size) {
1211		struct pci_cap_saved_state *tmp;
1212
1213		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1214		if (!tmp || tmp->cap.size != cap->size)
1215			return -EINVAL;
1216
1217		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1218		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1219		       sizeof(struct pci_cap_saved_data) + cap->size);
1220	}
1221
1222	dev->state_saved = true;
1223	return 0;
1224}
1225EXPORT_SYMBOL_GPL(pci_load_saved_state);
1226
1227/**
1228 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1229 *				   and free the memory allocated for it.
1230 * @dev: PCI device that we're dealing with
1231 * @state: Pointer to saved state returned from pci_store_saved_state()
1232 */
1233int pci_load_and_free_saved_state(struct pci_dev *dev,
1234				  struct pci_saved_state **state)
1235{
1236	int ret = pci_load_saved_state(dev, *state);
1237	kfree(*state);
1238	*state = NULL;
1239	return ret;
1240}
1241EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1242
1243int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1244{
1245	return pci_enable_resources(dev, bars);
1246}
1247
1248static int do_pci_enable_device(struct pci_dev *dev, int bars)
1249{
1250	int err;
1251	struct pci_dev *bridge;
1252	u16 cmd;
1253	u8 pin;
1254
1255	err = pci_set_power_state(dev, PCI_D0);
1256	if (err < 0 && err != -EIO)
1257		return err;
1258
1259	bridge = pci_upstream_bridge(dev);
1260	if (bridge)
1261		pcie_aspm_powersave_config_link(bridge);
1262
1263	err = pcibios_enable_device(dev, bars);
1264	if (err < 0)
1265		return err;
1266	pci_fixup_device(pci_fixup_enable, dev);
1267
1268	if (dev->msi_enabled || dev->msix_enabled)
1269		return 0;
1270
1271	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1272	if (pin) {
1273		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1274		if (cmd & PCI_COMMAND_INTX_DISABLE)
1275			pci_write_config_word(dev, PCI_COMMAND,
1276					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1277	}
1278
1279	return 0;
1280}
1281
1282/**
1283 * pci_reenable_device - Resume abandoned device
1284 * @dev: PCI device to be resumed
1285 *
1286 *  Note this function is a backend of pci_default_resume and is not supposed
1287 *  to be called by normal code, write proper resume handler and use it instead.
1288 */
1289int pci_reenable_device(struct pci_dev *dev)
1290{
1291	if (pci_is_enabled(dev))
1292		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1293	return 0;
1294}
1295EXPORT_SYMBOL(pci_reenable_device);
1296
1297static void pci_enable_bridge(struct pci_dev *dev)
1298{
1299	struct pci_dev *bridge;
1300	int retval;
1301
1302	bridge = pci_upstream_bridge(dev);
1303	if (bridge)
1304		pci_enable_bridge(bridge);
1305
1306	if (pci_is_enabled(dev)) {
1307		if (!dev->is_busmaster)
1308			pci_set_master(dev);
1309		return;
1310	}
1311
1312	retval = pci_enable_device(dev);
1313	if (retval)
1314		dev_err(&dev->dev, "Error enabling bridge (%d), continuing\n",
1315			retval);
1316	pci_set_master(dev);
1317}
1318
1319static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
 
1320{
1321	struct pci_dev *bridge;
1322	int err;
1323	int i, bars = 0;
1324
1325	/*
1326	 * Power state could be unknown at this point, either due to a fresh
1327	 * boot or a device removal call.  So get the current power state
1328	 * so that things like MSI message writing will behave as expected
1329	 * (e.g. if the device really is in D0 at enable time).
1330	 */
1331	if (dev->pm_cap) {
1332		u16 pmcsr;
1333		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1334		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1335	}
1336
1337	if (atomic_inc_return(&dev->enable_cnt) > 1)
1338		return 0;		/* already enabled */
1339
1340	bridge = pci_upstream_bridge(dev);
1341	if (bridge)
1342		pci_enable_bridge(bridge);
1343
1344	/* only skip sriov related */
1345	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1346		if (dev->resource[i].flags & flags)
1347			bars |= (1 << i);
1348	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1349		if (dev->resource[i].flags & flags)
1350			bars |= (1 << i);
1351
1352	err = do_pci_enable_device(dev, bars);
1353	if (err < 0)
1354		atomic_dec(&dev->enable_cnt);
1355	return err;
1356}
1357
1358/**
1359 * pci_enable_device_io - Initialize a device for use with IO space
1360 * @dev: PCI device to be initialized
1361 *
1362 *  Initialize device before it's used by a driver. Ask low-level code
1363 *  to enable I/O resources. Wake up the device if it was suspended.
1364 *  Beware, this function can fail.
1365 */
1366int pci_enable_device_io(struct pci_dev *dev)
1367{
1368	return pci_enable_device_flags(dev, IORESOURCE_IO);
1369}
1370EXPORT_SYMBOL(pci_enable_device_io);
1371
1372/**
1373 * pci_enable_device_mem - Initialize a device for use with Memory space
1374 * @dev: PCI device to be initialized
1375 *
1376 *  Initialize device before it's used by a driver. Ask low-level code
1377 *  to enable Memory resources. Wake up the device if it was suspended.
1378 *  Beware, this function can fail.
1379 */
1380int pci_enable_device_mem(struct pci_dev *dev)
1381{
1382	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1383}
1384EXPORT_SYMBOL(pci_enable_device_mem);
1385
1386/**
1387 * pci_enable_device - Initialize device before it's used by a driver.
1388 * @dev: PCI device to be initialized
1389 *
1390 *  Initialize device before it's used by a driver. Ask low-level code
1391 *  to enable I/O and memory. Wake up the device if it was suspended.
1392 *  Beware, this function can fail.
1393 *
1394 *  Note we don't actually enable the device many times if we call
1395 *  this function repeatedly (we just increment the count).
1396 */
1397int pci_enable_device(struct pci_dev *dev)
1398{
1399	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1400}
1401EXPORT_SYMBOL(pci_enable_device);
1402
1403/*
1404 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1405 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1406 * there's no need to track it separately.  pci_devres is initialized
1407 * when a device is enabled using managed PCI device enable interface.
1408 */
1409struct pci_devres {
1410	unsigned int enabled:1;
1411	unsigned int pinned:1;
1412	unsigned int orig_intx:1;
1413	unsigned int restore_intx:1;
1414	u32 region_mask;
1415};
1416
1417static void pcim_release(struct device *gendev, void *res)
1418{
1419	struct pci_dev *dev = to_pci_dev(gendev);
1420	struct pci_devres *this = res;
1421	int i;
1422
1423	if (dev->msi_enabled)
1424		pci_disable_msi(dev);
1425	if (dev->msix_enabled)
1426		pci_disable_msix(dev);
1427
1428	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1429		if (this->region_mask & (1 << i))
1430			pci_release_region(dev, i);
1431
1432	if (this->restore_intx)
1433		pci_intx(dev, this->orig_intx);
1434
1435	if (this->enabled && !this->pinned)
1436		pci_disable_device(dev);
1437}
1438
1439static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1440{
1441	struct pci_devres *dr, *new_dr;
1442
1443	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1444	if (dr)
1445		return dr;
1446
1447	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1448	if (!new_dr)
1449		return NULL;
1450	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1451}
1452
1453static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1454{
1455	if (pci_is_managed(pdev))
1456		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1457	return NULL;
1458}
1459
1460/**
1461 * pcim_enable_device - Managed pci_enable_device()
1462 * @pdev: PCI device to be initialized
1463 *
1464 * Managed pci_enable_device().
1465 */
1466int pcim_enable_device(struct pci_dev *pdev)
1467{
1468	struct pci_devres *dr;
1469	int rc;
1470
1471	dr = get_pci_dr(pdev);
1472	if (unlikely(!dr))
1473		return -ENOMEM;
1474	if (dr->enabled)
1475		return 0;
1476
1477	rc = pci_enable_device(pdev);
1478	if (!rc) {
1479		pdev->is_managed = 1;
1480		dr->enabled = 1;
1481	}
1482	return rc;
1483}
1484EXPORT_SYMBOL(pcim_enable_device);
1485
1486/**
1487 * pcim_pin_device - Pin managed PCI device
1488 * @pdev: PCI device to pin
1489 *
1490 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1491 * driver detach.  @pdev must have been enabled with
1492 * pcim_enable_device().
1493 */
1494void pcim_pin_device(struct pci_dev *pdev)
1495{
1496	struct pci_devres *dr;
1497
1498	dr = find_pci_dr(pdev);
1499	WARN_ON(!dr || !dr->enabled);
1500	if (dr)
1501		dr->pinned = 1;
1502}
1503EXPORT_SYMBOL(pcim_pin_device);
1504
1505/*
1506 * pcibios_add_device - provide arch specific hooks when adding device dev
1507 * @dev: the PCI device being added
1508 *
1509 * Permits the platform to provide architecture specific functionality when
1510 * devices are added. This is the default implementation. Architecture
1511 * implementations can override this.
1512 */
1513int __weak pcibios_add_device(struct pci_dev *dev)
1514{
1515	return 0;
1516}
1517
1518/**
1519 * pcibios_release_device - provide arch specific hooks when releasing device dev
1520 * @dev: the PCI device being released
1521 *
1522 * Permits the platform to provide architecture specific functionality when
1523 * devices are released. This is the default implementation. Architecture
1524 * implementations can override this.
1525 */
1526void __weak pcibios_release_device(struct pci_dev *dev) {}
1527
1528/**
1529 * pcibios_disable_device - disable arch specific PCI resources for device dev
1530 * @dev: the PCI device to disable
1531 *
1532 * Disables architecture specific PCI resources for the device. This
1533 * is the default implementation. Architecture implementations can
1534 * override this.
1535 */
1536void __weak pcibios_disable_device(struct pci_dev *dev) {}
1537
1538/**
1539 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1540 * @irq: ISA IRQ to penalize
1541 * @active: IRQ active or not
1542 *
1543 * Permits the platform to provide architecture-specific functionality when
1544 * penalizing ISA IRQs. This is the default implementation. Architecture
1545 * implementations can override this.
1546 */
1547void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1548
1549static void do_pci_disable_device(struct pci_dev *dev)
1550{
1551	u16 pci_command;
1552
1553	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1554	if (pci_command & PCI_COMMAND_MASTER) {
1555		pci_command &= ~PCI_COMMAND_MASTER;
1556		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1557	}
1558
1559	pcibios_disable_device(dev);
1560}
1561
1562/**
1563 * pci_disable_enabled_device - Disable device without updating enable_cnt
1564 * @dev: PCI device to disable
1565 *
1566 * NOTE: This function is a backend of PCI power management routines and is
1567 * not supposed to be called drivers.
1568 */
1569void pci_disable_enabled_device(struct pci_dev *dev)
1570{
1571	if (pci_is_enabled(dev))
1572		do_pci_disable_device(dev);
1573}
1574
1575/**
1576 * pci_disable_device - Disable PCI device after use
1577 * @dev: PCI device to be disabled
1578 *
1579 * Signal to the system that the PCI device is not in use by the system
1580 * anymore.  This only involves disabling PCI bus-mastering, if active.
1581 *
1582 * Note we don't actually disable the device until all callers of
1583 * pci_enable_device() have called pci_disable_device().
1584 */
1585void pci_disable_device(struct pci_dev *dev)
 
1586{
1587	struct pci_devres *dr;
1588
1589	dr = find_pci_dr(dev);
1590	if (dr)
1591		dr->enabled = 0;
1592
1593	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1594		      "disabling already-disabled device");
1595
1596	if (atomic_dec_return(&dev->enable_cnt) != 0)
1597		return;
1598
1599	do_pci_disable_device(dev);
1600
1601	dev->is_busmaster = 0;
1602}
1603EXPORT_SYMBOL(pci_disable_device);
1604
1605/**
1606 * pcibios_set_pcie_reset_state - set reset state for device dev
1607 * @dev: the PCIe device reset
1608 * @state: Reset state to enter into
1609 *
1610 *
1611 * Sets the PCIe reset state for the device. This is the default
1612 * implementation. Architecture implementations can override this.
1613 */
1614int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1615					enum pcie_reset_state state)
1616{
1617	return -EINVAL;
1618}
1619
1620/**
1621 * pci_set_pcie_reset_state - set reset state for device dev
1622 * @dev: the PCIe device reset
1623 * @state: Reset state to enter into
1624 *
1625 *
1626 * Sets the PCI reset state for the device.
1627 */
1628int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1629{
1630	return pcibios_set_pcie_reset_state(dev, state);
1631}
1632EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1633
1634/**
1635 * pci_check_pme_status - Check if given device has generated PME.
1636 * @dev: Device to check.
1637 *
1638 * Check the PME status of the device and if set, clear it and clear PME enable
1639 * (if set).  Return 'true' if PME status and PME enable were both set or
1640 * 'false' otherwise.
1641 */
1642bool pci_check_pme_status(struct pci_dev *dev)
1643{
1644	int pmcsr_pos;
1645	u16 pmcsr;
1646	bool ret = false;
1647
1648	if (!dev->pm_cap)
1649		return false;
1650
1651	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1652	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1653	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1654		return false;
1655
1656	/* Clear PME status. */
1657	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1658	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1659		/* Disable PME to avoid interrupt flood. */
1660		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1661		ret = true;
1662	}
1663
1664	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1665
1666	return ret;
1667}
1668
1669/**
1670 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1671 * @dev: Device to handle.
1672 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1673 *
1674 * Check if @dev has generated PME and queue a resume request for it in that
1675 * case.
1676 */
1677static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1678{
1679	if (pme_poll_reset && dev->pme_poll)
1680		dev->pme_poll = false;
1681
1682	if (pci_check_pme_status(dev)) {
1683		pci_wakeup_event(dev);
1684		pm_request_resume(&dev->dev);
1685	}
1686	return 0;
1687}
1688
1689/**
1690 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1691 * @bus: Top bus of the subtree to walk.
1692 */
1693void pci_pme_wakeup_bus(struct pci_bus *bus)
1694{
1695	if (bus)
1696		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1697}
1698
1699
1700/**
1701 * pci_pme_capable - check the capability of PCI device to generate PME#
1702 * @dev: PCI device to handle.
1703 * @state: PCI state from which device will issue PME#.
1704 */
1705bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1706{
1707	if (!dev->pm_cap)
1708		return false;
1709
1710	return !!(dev->pme_support & (1 << state));
1711}
1712EXPORT_SYMBOL(pci_pme_capable);
1713
1714static void pci_pme_list_scan(struct work_struct *work)
1715{
1716	struct pci_pme_device *pme_dev, *n;
1717
1718	mutex_lock(&pci_pme_list_mutex);
1719	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1720		if (pme_dev->dev->pme_poll) {
1721			struct pci_dev *bridge;
1722
1723			bridge = pme_dev->dev->bus->self;
1724			/*
1725			 * If bridge is in low power state, the
1726			 * configuration space of subordinate devices
1727			 * may be not accessible
1728			 */
1729			if (bridge && bridge->current_state != PCI_D0)
1730				continue;
1731			pci_pme_wakeup(pme_dev->dev, NULL);
1732		} else {
1733			list_del(&pme_dev->list);
1734			kfree(pme_dev);
1735		}
1736	}
1737	if (!list_empty(&pci_pme_list))
1738		schedule_delayed_work(&pci_pme_work,
1739				      msecs_to_jiffies(PME_TIMEOUT));
1740	mutex_unlock(&pci_pme_list_mutex);
1741}
1742
1743static void __pci_pme_active(struct pci_dev *dev, bool enable)
 
 
 
 
 
 
1744{
1745	u16 pmcsr;
1746
1747	if (!dev->pme_support)
1748		return;
1749
1750	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1751	/* Clear PME_Status by writing 1 to it and enable PME# */
1752	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1753	if (!enable)
1754		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1755
1756	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1757}
1758
1759/**
1760 * pci_pme_active - enable or disable PCI device's PME# function
1761 * @dev: PCI device to handle.
1762 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1763 *
1764 * The caller must verify that the device is capable of generating PME# before
1765 * calling this function with @enable equal to 'true'.
1766 */
1767void pci_pme_active(struct pci_dev *dev, bool enable)
1768{
1769	__pci_pme_active(dev, enable);
1770
1771	/*
1772	 * PCI (as opposed to PCIe) PME requires that the device have
1773	 * its PME# line hooked up correctly. Not all hardware vendors
1774	 * do this, so the PME never gets delivered and the device
1775	 * remains asleep. The easiest way around this is to
1776	 * periodically walk the list of suspended devices and check
1777	 * whether any have their PME flag set. The assumption is that
1778	 * we'll wake up often enough anyway that this won't be a huge
1779	 * hit, and the power savings from the devices will still be a
1780	 * win.
1781	 *
1782	 * Although PCIe uses in-band PME message instead of PME# line
1783	 * to report PME, PME does not work for some PCIe devices in
1784	 * reality.  For example, there are devices that set their PME
1785	 * status bits, but don't really bother to send a PME message;
1786	 * there are PCI Express Root Ports that don't bother to
1787	 * trigger interrupts when they receive PME messages from the
1788	 * devices below.  So PME poll is used for PCIe devices too.
1789	 */
 
1790
1791	if (dev->pme_poll) {
1792		struct pci_pme_device *pme_dev;
1793		if (enable) {
1794			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1795					  GFP_KERNEL);
1796			if (!pme_dev) {
1797				dev_warn(&dev->dev, "can't enable PME#\n");
1798				return;
1799			}
1800			pme_dev->dev = dev;
1801			mutex_lock(&pci_pme_list_mutex);
1802			list_add(&pme_dev->list, &pci_pme_list);
1803			if (list_is_singular(&pci_pme_list))
1804				schedule_delayed_work(&pci_pme_work,
1805						      msecs_to_jiffies(PME_TIMEOUT));
1806			mutex_unlock(&pci_pme_list_mutex);
1807		} else {
1808			mutex_lock(&pci_pme_list_mutex);
1809			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1810				if (pme_dev->dev == dev) {
1811					list_del(&pme_dev->list);
1812					kfree(pme_dev);
1813					break;
1814				}
1815			}
1816			mutex_unlock(&pci_pme_list_mutex);
1817		}
1818	}
1819
1820	dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
 
 
1821}
1822EXPORT_SYMBOL(pci_pme_active);
1823
1824/**
1825 * __pci_enable_wake - enable PCI device as wakeup event source
1826 * @dev: PCI device affected
1827 * @state: PCI state from which device will issue wakeup events
1828 * @runtime: True if the events are to be generated at run time
1829 * @enable: True to enable event generation; false to disable
1830 *
1831 * This enables the device as a wakeup event source, or disables it.
1832 * When such events involves platform-specific hooks, those hooks are
1833 * called automatically by this routine.
1834 *
1835 * Devices with legacy power management (no standard PCI PM capabilities)
1836 * always require such platform hooks.
1837 *
1838 * RETURN VALUE:
1839 * 0 is returned on success
1840 * -EINVAL is returned if device is not supposed to wake up the system
1841 * Error code depending on the platform is returned if both the platform and
1842 * the native mechanism fail to enable the generation of wake-up events
1843 */
1844int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1845		      bool runtime, bool enable)
1846{
1847	int ret = 0;
1848
1849	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1850		return -EINVAL;
1851
1852	/* Don't do the same thing twice in a row for one device. */
1853	if (!!enable == !!dev->wakeup_prepared)
1854		return 0;
1855
1856	/*
1857	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1858	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1859	 * enable.  To disable wake-up we call the platform first, for symmetry.
1860	 */
1861
1862	if (enable) {
1863		int error;
1864
1865		if (pci_pme_capable(dev, state))
1866			pci_pme_active(dev, true);
1867		else
1868			ret = 1;
1869		error = runtime ? platform_pci_run_wake(dev, true) :
1870					platform_pci_sleep_wake(dev, true);
1871		if (ret)
1872			ret = error;
1873		if (!ret)
1874			dev->wakeup_prepared = true;
1875	} else {
1876		if (runtime)
1877			platform_pci_run_wake(dev, false);
1878		else
1879			platform_pci_sleep_wake(dev, false);
1880		pci_pme_active(dev, false);
1881		dev->wakeup_prepared = false;
1882	}
1883
1884	return ret;
1885}
1886EXPORT_SYMBOL(__pci_enable_wake);
1887
1888/**
1889 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1890 * @dev: PCI device to prepare
1891 * @enable: True to enable wake-up event generation; false to disable
1892 *
1893 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1894 * and this function allows them to set that up cleanly - pci_enable_wake()
1895 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1896 * ordering constraints.
1897 *
1898 * This function only returns error code if the device is not capable of
1899 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1900 * enable wake-up power for it.
1901 */
1902int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1903{
1904	return pci_pme_capable(dev, PCI_D3cold) ?
1905			pci_enable_wake(dev, PCI_D3cold, enable) :
1906			pci_enable_wake(dev, PCI_D3hot, enable);
1907}
1908EXPORT_SYMBOL(pci_wake_from_d3);
1909
1910/**
1911 * pci_target_state - find an appropriate low power state for a given PCI dev
1912 * @dev: PCI device
1913 *
1914 * Use underlying platform code to find a supported low power state for @dev.
1915 * If the platform can't manage @dev, return the deepest state from which it
1916 * can generate wake events, based on any available PME info.
1917 */
1918static pci_power_t pci_target_state(struct pci_dev *dev)
1919{
1920	pci_power_t target_state = PCI_D3hot;
1921
1922	if (platform_pci_power_manageable(dev)) {
1923		/*
1924		 * Call the platform to choose the target state of the device
1925		 * and enable wake-up from this state if supported.
1926		 */
1927		pci_power_t state = platform_pci_choose_state(dev);
1928
1929		switch (state) {
1930		case PCI_POWER_ERROR:
1931		case PCI_UNKNOWN:
1932			break;
1933		case PCI_D1:
1934		case PCI_D2:
1935			if (pci_no_d1d2(dev))
1936				break;
1937		default:
1938			target_state = state;
1939		}
1940	} else if (!dev->pm_cap) {
1941		target_state = PCI_D0;
1942	} else if (device_may_wakeup(&dev->dev)) {
1943		/*
1944		 * Find the deepest state from which the device can generate
1945		 * wake-up events, make it the target state and enable device
1946		 * to generate PME#.
1947		 */
1948		if (dev->pme_support) {
1949			while (target_state
1950			      && !(dev->pme_support & (1 << target_state)))
1951				target_state--;
1952		}
1953	}
1954
1955	return target_state;
1956}
1957
1958/**
1959 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1960 * @dev: Device to handle.
1961 *
1962 * Choose the power state appropriate for the device depending on whether
1963 * it can wake up the system and/or is power manageable by the platform
1964 * (PCI_D3hot is the default) and put the device into that state.
1965 */
1966int pci_prepare_to_sleep(struct pci_dev *dev)
1967{
1968	pci_power_t target_state = pci_target_state(dev);
1969	int error;
1970
1971	if (target_state == PCI_POWER_ERROR)
1972		return -EIO;
1973
1974	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1975
1976	error = pci_set_power_state(dev, target_state);
1977
1978	if (error)
1979		pci_enable_wake(dev, target_state, false);
1980
1981	return error;
1982}
1983EXPORT_SYMBOL(pci_prepare_to_sleep);
1984
1985/**
1986 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1987 * @dev: Device to handle.
1988 *
1989 * Disable device's system wake-up capability and put it into D0.
1990 */
1991int pci_back_from_sleep(struct pci_dev *dev)
1992{
1993	pci_enable_wake(dev, PCI_D0, false);
1994	return pci_set_power_state(dev, PCI_D0);
1995}
1996EXPORT_SYMBOL(pci_back_from_sleep);
1997
1998/**
1999 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2000 * @dev: PCI device being suspended.
2001 *
2002 * Prepare @dev to generate wake-up events at run time and put it into a low
2003 * power state.
2004 */
2005int pci_finish_runtime_suspend(struct pci_dev *dev)
2006{
2007	pci_power_t target_state = pci_target_state(dev);
2008	int error;
2009
2010	if (target_state == PCI_POWER_ERROR)
2011		return -EIO;
2012
2013	dev->runtime_d3cold = target_state == PCI_D3cold;
2014
2015	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
2016
2017	error = pci_set_power_state(dev, target_state);
2018
2019	if (error) {
2020		__pci_enable_wake(dev, target_state, true, false);
2021		dev->runtime_d3cold = false;
2022	}
2023
2024	return error;
2025}
2026
2027/**
2028 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2029 * @dev: Device to check.
2030 *
2031 * Return true if the device itself is capable of generating wake-up events
2032 * (through the platform or using the native PCIe PME) or if the device supports
2033 * PME and one of its upstream bridges can generate wake-up events.
2034 */
2035bool pci_dev_run_wake(struct pci_dev *dev)
2036{
2037	struct pci_bus *bus = dev->bus;
2038
2039	if (device_run_wake(&dev->dev))
2040		return true;
2041
2042	if (!dev->pme_support)
2043		return false;
2044
2045	while (bus->parent) {
2046		struct pci_dev *bridge = bus->self;
2047
2048		if (device_run_wake(&bridge->dev))
2049			return true;
2050
2051		bus = bus->parent;
2052	}
2053
2054	/* We have reached the root bus. */
2055	if (bus->bridge)
2056		return device_run_wake(bus->bridge);
2057
2058	return false;
2059}
2060EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2061
2062/**
2063 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2064 * @pci_dev: Device to check.
2065 *
2066 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2067 * reconfigured due to wakeup settings difference between system and runtime
2068 * suspend and the current power state of it is suitable for the upcoming
2069 * (system) transition.
2070 *
2071 * If the device is not configured for system wakeup, disable PME for it before
2072 * returning 'true' to prevent it from waking up the system unnecessarily.
2073 */
2074bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2075{
2076	struct device *dev = &pci_dev->dev;
2077
2078	if (!pm_runtime_suspended(dev)
2079	    || pci_target_state(pci_dev) != pci_dev->current_state
2080	    || platform_pci_need_resume(pci_dev))
2081		return false;
2082
2083	/*
2084	 * At this point the device is good to go unless it's been configured
2085	 * to generate PME at the runtime suspend time, but it is not supposed
2086	 * to wake up the system.  In that case, simply disable PME for it
2087	 * (it will have to be re-enabled on exit from system resume).
2088	 *
2089	 * If the device's power state is D3cold and the platform check above
2090	 * hasn't triggered, the device's configuration is suitable and we don't
2091	 * need to manipulate it at all.
2092	 */
2093	spin_lock_irq(&dev->power.lock);
2094
2095	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2096	    !device_may_wakeup(dev))
2097		__pci_pme_active(pci_dev, false);
2098
2099	spin_unlock_irq(&dev->power.lock);
2100	return true;
2101}
2102
2103/**
2104 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2105 * @pci_dev: Device to handle.
2106 *
2107 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2108 * it might have been disabled during the prepare phase of system suspend if
2109 * the device was not configured for system wakeup.
2110 */
2111void pci_dev_complete_resume(struct pci_dev *pci_dev)
2112{
2113	struct device *dev = &pci_dev->dev;
2114
2115	if (!pci_dev_run_wake(pci_dev))
2116		return;
2117
2118	spin_lock_irq(&dev->power.lock);
2119
2120	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2121		__pci_pme_active(pci_dev, true);
2122
2123	spin_unlock_irq(&dev->power.lock);
2124}
2125
2126void pci_config_pm_runtime_get(struct pci_dev *pdev)
2127{
2128	struct device *dev = &pdev->dev;
2129	struct device *parent = dev->parent;
2130
2131	if (parent)
2132		pm_runtime_get_sync(parent);
2133	pm_runtime_get_noresume(dev);
2134	/*
2135	 * pdev->current_state is set to PCI_D3cold during suspending,
2136	 * so wait until suspending completes
2137	 */
2138	pm_runtime_barrier(dev);
2139	/*
2140	 * Only need to resume devices in D3cold, because config
2141	 * registers are still accessible for devices suspended but
2142	 * not in D3cold.
2143	 */
2144	if (pdev->current_state == PCI_D3cold)
2145		pm_runtime_resume(dev);
2146}
2147
2148void pci_config_pm_runtime_put(struct pci_dev *pdev)
2149{
2150	struct device *dev = &pdev->dev;
2151	struct device *parent = dev->parent;
2152
2153	pm_runtime_put(dev);
2154	if (parent)
2155		pm_runtime_put_sync(parent);
2156}
2157
2158/**
2159 * pci_pm_init - Initialize PM functions of given PCI device
2160 * @dev: PCI device to handle.
2161 */
2162void pci_pm_init(struct pci_dev *dev)
2163{
2164	int pm;
2165	u16 pmc;
2166
2167	pm_runtime_forbid(&dev->dev);
2168	pm_runtime_set_active(&dev->dev);
2169	pm_runtime_enable(&dev->dev);
2170	device_enable_async_suspend(&dev->dev);
2171	dev->wakeup_prepared = false;
2172
2173	dev->pm_cap = 0;
2174	dev->pme_support = 0;
2175
2176	/* find PCI PM capability in list */
2177	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2178	if (!pm)
2179		return;
2180	/* Check device's ability to generate PME# */
2181	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2182
2183	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2184		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
2185			pmc & PCI_PM_CAP_VER_MASK);
2186		return;
2187	}
2188
2189	dev->pm_cap = pm;
2190	dev->d3_delay = PCI_PM_D3_WAIT;
2191	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2192	dev->d3cold_allowed = true;
2193
2194	dev->d1_support = false;
2195	dev->d2_support = false;
2196	if (!pci_no_d1d2(dev)) {
2197		if (pmc & PCI_PM_CAP_D1)
2198			dev->d1_support = true;
2199		if (pmc & PCI_PM_CAP_D2)
2200			dev->d2_support = true;
2201
2202		if (dev->d1_support || dev->d2_support)
2203			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
2204				   dev->d1_support ? " D1" : "",
2205				   dev->d2_support ? " D2" : "");
2206	}
2207
2208	pmc &= PCI_PM_CAP_PME_MASK;
2209	if (pmc) {
2210		dev_printk(KERN_DEBUG, &dev->dev,
2211			 "PME# supported from%s%s%s%s%s\n",
2212			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2213			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2214			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2215			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2216			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2217		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2218		dev->pme_poll = true;
2219		/*
2220		 * Make device's PM flags reflect the wake-up capability, but
2221		 * let the user space enable it to wake up the system as needed.
2222		 */
2223		device_set_wakeup_capable(&dev->dev, true);
2224		/* Disable the PME# generation functionality */
2225		pci_pme_active(dev, false);
 
 
2226	}
2227}
2228
2229static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2230{
2231	unsigned long flags = IORESOURCE_PCI_FIXED;
2232
2233	switch (prop) {
2234	case PCI_EA_P_MEM:
2235	case PCI_EA_P_VF_MEM:
2236		flags |= IORESOURCE_MEM;
2237		break;
2238	case PCI_EA_P_MEM_PREFETCH:
2239	case PCI_EA_P_VF_MEM_PREFETCH:
2240		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2241		break;
2242	case PCI_EA_P_IO:
2243		flags |= IORESOURCE_IO;
2244		break;
2245	default:
2246		return 0;
2247	}
2248
2249	return flags;
2250}
2251
2252static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2253					    u8 prop)
2254{
2255	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2256		return &dev->resource[bei];
2257#ifdef CONFIG_PCI_IOV
2258	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2259		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2260		return &dev->resource[PCI_IOV_RESOURCES +
2261				      bei - PCI_EA_BEI_VF_BAR0];
2262#endif
2263	else if (bei == PCI_EA_BEI_ROM)
2264		return &dev->resource[PCI_ROM_RESOURCE];
2265	else
2266		return NULL;
2267}
2268
2269/* Read an Enhanced Allocation (EA) entry */
2270static int pci_ea_read(struct pci_dev *dev, int offset)
2271{
2272	struct resource *res;
2273	int ent_size, ent_offset = offset;
2274	resource_size_t start, end;
2275	unsigned long flags;
2276	u32 dw0, bei, base, max_offset;
2277	u8 prop;
2278	bool support_64 = (sizeof(resource_size_t) >= 8);
2279
2280	pci_read_config_dword(dev, ent_offset, &dw0);
2281	ent_offset += 4;
2282
2283	/* Entry size field indicates DWORDs after 1st */
2284	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2285
2286	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2287		goto out;
2288
2289	bei = (dw0 & PCI_EA_BEI) >> 4;
2290	prop = (dw0 & PCI_EA_PP) >> 8;
2291
2292	/*
2293	 * If the Property is in the reserved range, try the Secondary
2294	 * Property instead.
2295	 */
2296	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2297		prop = (dw0 & PCI_EA_SP) >> 16;
2298	if (prop > PCI_EA_P_BRIDGE_IO)
2299		goto out;
2300
2301	res = pci_ea_get_resource(dev, bei, prop);
2302	if (!res) {
2303		dev_err(&dev->dev, "Unsupported EA entry BEI: %u\n", bei);
2304		goto out;
2305	}
2306
2307	flags = pci_ea_flags(dev, prop);
2308	if (!flags) {
2309		dev_err(&dev->dev, "Unsupported EA properties: %#x\n", prop);
2310		goto out;
2311	}
2312
2313	/* Read Base */
2314	pci_read_config_dword(dev, ent_offset, &base);
2315	start = (base & PCI_EA_FIELD_MASK);
2316	ent_offset += 4;
2317
2318	/* Read MaxOffset */
2319	pci_read_config_dword(dev, ent_offset, &max_offset);
2320	ent_offset += 4;
2321
2322	/* Read Base MSBs (if 64-bit entry) */
2323	if (base & PCI_EA_IS_64) {
2324		u32 base_upper;
2325
2326		pci_read_config_dword(dev, ent_offset, &base_upper);
2327		ent_offset += 4;
2328
2329		flags |= IORESOURCE_MEM_64;
2330
2331		/* entry starts above 32-bit boundary, can't use */
2332		if (!support_64 && base_upper)
2333			goto out;
2334
2335		if (support_64)
2336			start |= ((u64)base_upper << 32);
2337	}
2338
2339	end = start + (max_offset | 0x03);
2340
2341	/* Read MaxOffset MSBs (if 64-bit entry) */
2342	if (max_offset & PCI_EA_IS_64) {
2343		u32 max_offset_upper;
2344
2345		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2346		ent_offset += 4;
2347
2348		flags |= IORESOURCE_MEM_64;
2349
2350		/* entry too big, can't use */
2351		if (!support_64 && max_offset_upper)
2352			goto out;
2353
2354		if (support_64)
2355			end += ((u64)max_offset_upper << 32);
2356	}
2357
2358	if (end < start) {
2359		dev_err(&dev->dev, "EA Entry crosses address boundary\n");
2360		goto out;
2361	}
2362
2363	if (ent_size != ent_offset - offset) {
2364		dev_err(&dev->dev,
2365			"EA Entry Size (%d) does not match length read (%d)\n",
2366			ent_size, ent_offset - offset);
2367		goto out;
2368	}
2369
2370	res->name = pci_name(dev);
2371	res->start = start;
2372	res->end = end;
2373	res->flags = flags;
2374
2375	if (bei <= PCI_EA_BEI_BAR5)
2376		dev_printk(KERN_DEBUG, &dev->dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2377			   bei, res, prop);
2378	else if (bei == PCI_EA_BEI_ROM)
2379		dev_printk(KERN_DEBUG, &dev->dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2380			   res, prop);
2381	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2382		dev_printk(KERN_DEBUG, &dev->dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2383			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
2384	else
2385		dev_printk(KERN_DEBUG, &dev->dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2386			   bei, res, prop);
2387
2388out:
2389	return offset + ent_size;
2390}
2391
2392/* Enhanced Allocation Initalization */
2393void pci_ea_init(struct pci_dev *dev)
2394{
2395	int ea;
2396	u8 num_ent;
2397	int offset;
2398	int i;
2399
2400	/* find PCI EA capability in list */
2401	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2402	if (!ea)
2403		return;
2404
2405	/* determine the number of entries */
2406	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2407					&num_ent);
2408	num_ent &= PCI_EA_NUM_ENT_MASK;
2409
2410	offset = ea + PCI_EA_FIRST_ENT;
2411
2412	/* Skip DWORD 2 for type 1 functions */
2413	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2414		offset += 4;
2415
2416	/* parse each EA entry */
2417	for (i = 0; i < num_ent; ++i)
2418		offset = pci_ea_read(dev, offset);
2419}
2420
2421static void pci_add_saved_cap(struct pci_dev *pci_dev,
2422	struct pci_cap_saved_state *new_cap)
2423{
2424	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2425}
2426
2427/**
2428 * _pci_add_cap_save_buffer - allocate buffer for saving given
2429 *                            capability registers
2430 * @dev: the PCI device
2431 * @cap: the capability to allocate the buffer for
2432 * @extended: Standard or Extended capability ID
2433 * @size: requested size of the buffer
2434 */
2435static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2436				    bool extended, unsigned int size)
2437{
2438	int pos;
2439	struct pci_cap_saved_state *save_state;
2440
2441	if (extended)
2442		pos = pci_find_ext_capability(dev, cap);
2443	else
2444		pos = pci_find_capability(dev, cap);
2445
2446	if (!pos)
2447		return 0;
2448
2449	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2450	if (!save_state)
2451		return -ENOMEM;
2452
2453	save_state->cap.cap_nr = cap;
2454	save_state->cap.cap_extended = extended;
2455	save_state->cap.size = size;
2456	pci_add_saved_cap(dev, save_state);
2457
2458	return 0;
2459}
2460
2461int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2462{
2463	return _pci_add_cap_save_buffer(dev, cap, false, size);
2464}
2465
2466int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2467{
2468	return _pci_add_cap_save_buffer(dev, cap, true, size);
2469}
2470
2471/**
2472 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2473 * @dev: the PCI device
2474 */
2475void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2476{
2477	int error;
2478
2479	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2480					PCI_EXP_SAVE_REGS * sizeof(u16));
2481	if (error)
2482		dev_err(&dev->dev,
2483			"unable to preallocate PCI Express save buffer\n");
2484
2485	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2486	if (error)
2487		dev_err(&dev->dev,
2488			"unable to preallocate PCI-X save buffer\n");
2489
2490	pci_allocate_vc_save_buffers(dev);
2491}
2492
2493void pci_free_cap_save_buffers(struct pci_dev *dev)
2494{
2495	struct pci_cap_saved_state *tmp;
2496	struct hlist_node *n;
2497
2498	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2499		kfree(tmp);
2500}
2501
2502/**
2503 * pci_configure_ari - enable or disable ARI forwarding
2504 * @dev: the PCI device
2505 *
2506 * If @dev and its upstream bridge both support ARI, enable ARI in the
2507 * bridge.  Otherwise, disable ARI in the bridge.
2508 */
2509void pci_configure_ari(struct pci_dev *dev)
2510{
 
2511	u32 cap;
 
2512	struct pci_dev *bridge;
2513
2514	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
 
 
 
 
2515		return;
2516
2517	bridge = dev->bus->self;
2518	if (!bridge)
 
 
 
 
 
 
 
 
 
2519		return;
2520
2521	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
2522	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2523		return;
2524
2525	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2526		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2527					 PCI_EXP_DEVCTL2_ARI);
2528		bridge->ari_enabled = 1;
2529	} else {
2530		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2531					   PCI_EXP_DEVCTL2_ARI);
2532		bridge->ari_enabled = 0;
2533	}
2534}
2535
2536static int pci_acs_enable;
2537
2538/**
2539 * pci_request_acs - ask for ACS to be enabled if supported
 
 
 
 
 
 
2540 */
2541void pci_request_acs(void)
2542{
2543	pci_acs_enable = 1;
 
 
 
 
 
 
 
 
 
 
 
 
2544}
 
2545
2546/**
2547 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
2548 * @dev: the PCI device
 
2549 */
2550static int pci_std_enable_acs(struct pci_dev *dev)
2551{
2552	int pos;
2553	u16 cap;
2554	u16 ctrl;
2555
2556	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
 
 
 
2557	if (!pos)
2558		return -ENODEV;
2559
2560	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2561	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 
 
 
 
 
 
2562
2563	/* Source Validation */
2564	ctrl |= (cap & PCI_ACS_SV);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2565
2566	/* P2P Request Redirect */
2567	ctrl |= (cap & PCI_ACS_RR);
2568
2569	/* P2P Completion Redirect */
2570	ctrl |= (cap & PCI_ACS_CR);
 
2571
2572	/* Upstream Forwarding */
2573	ctrl |= (cap & PCI_ACS_UF);
 
 
 
 
 
 
 
 
2574
2575	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2576
2577	return 0;
2578}
 
2579
2580/**
2581 * pci_enable_acs - enable ACS if hardware support it
2582 * @dev: the PCI device
 
 
2583 */
2584void pci_enable_acs(struct pci_dev *dev)
2585{
2586	if (!pci_acs_enable)
 
 
 
2587		return;
2588
2589	if (!pci_std_enable_acs(dev))
 
2590		return;
2591
2592	pci_dev_specific_enable_acs(dev);
 
 
2593}
 
2594
2595static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
 
 
 
 
 
 
 
2596{
2597	int pos;
2598	u16 cap, ctrl;
2599
2600	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
 
 
 
2601	if (!pos)
2602		return false;
2603
2604	/*
2605	 * Except for egress control, capabilities are either required
2606	 * or only required if controllable.  Features missing from the
2607	 * capability field can therefore be assumed as hard-wired enabled.
2608	 */
2609	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2610	acs_flags &= (cap | PCI_ACS_EC);
2611
2612	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2613	return (ctrl & acs_flags) == acs_flags;
2614}
 
2615
2616/**
2617 * pci_acs_enabled - test ACS against required flags for a given device
2618 * @pdev: device to test
2619 * @acs_flags: required PCI ACS flags
2620 *
2621 * Return true if the device supports the provided flags.  Automatically
2622 * filters out flags that are not implemented on multifunction devices.
2623 *
2624 * Note that this interface checks the effective ACS capabilities of the
2625 * device rather than the actual capabilities.  For instance, most single
2626 * function endpoints are not required to support ACS because they have no
2627 * opportunity for peer-to-peer access.  We therefore return 'true'
2628 * regardless of whether the device exposes an ACS capability.  This makes
2629 * it much easier for callers of this function to ignore the actual type
2630 * or topology of the device when testing ACS support.
2631 */
2632bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2633{
 
 
2634	int ret;
2635
2636	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2637	if (ret >= 0)
2638		return ret > 0;
2639
2640	/*
2641	 * Conventional PCI and PCI-X devices never support ACS, either
2642	 * effectively or actually.  The shared bus topology implies that
2643	 * any device on the bus can receive or snoop DMA.
2644	 */
2645	if (!pci_is_pcie(pdev))
2646		return false;
2647
2648	switch (pci_pcie_type(pdev)) {
2649	/*
2650	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2651	 * but since their primary interface is PCI/X, we conservatively
2652	 * handle them as we would a non-PCIe device.
2653	 */
2654	case PCI_EXP_TYPE_PCIE_BRIDGE:
2655	/*
2656	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
2657	 * applicable... must never implement an ACS Extended Capability...".
2658	 * This seems arbitrary, but we take a conservative interpretation
2659	 * of this statement.
2660	 */
2661	case PCI_EXP_TYPE_PCI_BRIDGE:
2662	case PCI_EXP_TYPE_RC_EC:
2663		return false;
2664	/*
2665	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2666	 * implement ACS in order to indicate their peer-to-peer capabilities,
2667	 * regardless of whether they are single- or multi-function devices.
2668	 */
2669	case PCI_EXP_TYPE_DOWNSTREAM:
2670	case PCI_EXP_TYPE_ROOT_PORT:
2671		return pci_acs_flags_enabled(pdev, acs_flags);
2672	/*
2673	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2674	 * implemented by the remaining PCIe types to indicate peer-to-peer
2675	 * capabilities, but only when they are part of a multifunction
2676	 * device.  The footnote for section 6.12 indicates the specific
2677	 * PCIe types included here.
2678	 */
2679	case PCI_EXP_TYPE_ENDPOINT:
2680	case PCI_EXP_TYPE_UPSTREAM:
2681	case PCI_EXP_TYPE_LEG_END:
2682	case PCI_EXP_TYPE_RC_END:
2683		if (!pdev->multifunction)
2684			break;
2685
2686		return pci_acs_flags_enabled(pdev, acs_flags);
 
 
 
 
2687	}
2688
2689	/*
2690	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2691	 * to single function devices with the exception of downstream ports.
2692	 */
2693	return true;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2694}
2695
2696/**
2697 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2698 * @start: starting downstream device
2699 * @end: ending upstream device or NULL to search to the root bus
2700 * @acs_flags: required flags
2701 *
2702 * Walk up a device tree from start to end testing PCI ACS support.  If
2703 * any step along the way does not support the required flags, return false.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2704 */
2705bool pci_acs_path_enabled(struct pci_dev *start,
2706			  struct pci_dev *end, u16 acs_flags)
 
 
 
 
 
 
 
 
2707{
2708	struct pci_dev *pdev, *parent = start;
 
 
 
 
 
 
 
 
 
 
 
 
2709
2710	do {
2711		pdev = parent;
2712
2713		if (!pci_acs_enabled(pdev, acs_flags))
2714			return false;
2715
2716		if (pci_is_root_bus(pdev->bus))
2717			return (end == NULL);
2718
2719		parent = pdev->bus->self;
2720	} while (pdev != end);
2721
2722	return true;
 
 
 
2723}
2724
2725/**
2726 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2727 * @dev: the PCI device
2728 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
2729 *
2730 * Perform INTx swizzling for a device behind one level of bridge.  This is
2731 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2732 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2733 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2734 * the PCI Express Base Specification, Revision 2.1)
2735 */
2736u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2737{
2738	int slot;
2739
2740	if (pci_ari_enabled(dev->bus))
2741		slot = 0;
2742	else
2743		slot = PCI_SLOT(dev->devfn);
2744
2745	return (((pin - 1) + slot) % 4) + 1;
2746}
2747
2748int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
 
2749{
2750	u8 pin;
2751
2752	pin = dev->pin;
2753	if (!pin)
2754		return -1;
2755
2756	while (!pci_is_root_bus(dev->bus)) {
2757		pin = pci_swizzle_interrupt_pin(dev, pin);
2758		dev = dev->bus->self;
2759	}
2760	*bridge = dev;
2761	return pin;
2762}
2763
2764/**
2765 * pci_common_swizzle - swizzle INTx all the way to root bridge
2766 * @dev: the PCI device
2767 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2768 *
2769 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2770 * bridges all the way up to a PCI root bus.
2771 */
2772u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2773{
2774	u8 pin = *pinp;
2775
2776	while (!pci_is_root_bus(dev->bus)) {
2777		pin = pci_swizzle_interrupt_pin(dev, pin);
2778		dev = dev->bus->self;
2779	}
2780	*pinp = pin;
2781	return PCI_SLOT(dev->devfn);
2782}
2783EXPORT_SYMBOL_GPL(pci_common_swizzle);
2784
2785/**
2786 *	pci_release_region - Release a PCI bar
2787 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2788 *	@bar: BAR to release
2789 *
2790 *	Releases the PCI I/O and memory resources previously reserved by a
2791 *	successful call to pci_request_region.  Call this function only
2792 *	after all use of the PCI regions has ceased.
2793 */
2794void pci_release_region(struct pci_dev *pdev, int bar)
2795{
2796	struct pci_devres *dr;
2797
2798	if (pci_resource_len(pdev, bar) == 0)
2799		return;
2800	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2801		release_region(pci_resource_start(pdev, bar),
2802				pci_resource_len(pdev, bar));
2803	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2804		release_mem_region(pci_resource_start(pdev, bar),
2805				pci_resource_len(pdev, bar));
2806
2807	dr = find_pci_dr(pdev);
2808	if (dr)
2809		dr->region_mask &= ~(1 << bar);
2810}
2811EXPORT_SYMBOL(pci_release_region);
2812
2813/**
2814 *	__pci_request_region - Reserved PCI I/O and memory resource
2815 *	@pdev: PCI device whose resources are to be reserved
2816 *	@bar: BAR to be reserved
2817 *	@res_name: Name to be associated with resource.
2818 *	@exclusive: whether the region access is exclusive or not
2819 *
2820 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2821 *	being reserved by owner @res_name.  Do not access any
2822 *	address inside the PCI regions unless this call returns
2823 *	successfully.
2824 *
2825 *	If @exclusive is set, then the region is marked so that userspace
2826 *	is explicitly not allowed to map the resource via /dev/mem or
2827 *	sysfs MMIO access.
2828 *
2829 *	Returns 0 on success, or %EBUSY on error.  A warning
2830 *	message is also printed on failure.
2831 */
2832static int __pci_request_region(struct pci_dev *pdev, int bar,
2833				const char *res_name, int exclusive)
2834{
2835	struct pci_devres *dr;
2836
2837	if (pci_resource_len(pdev, bar) == 0)
2838		return 0;
2839
2840	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2841		if (!request_region(pci_resource_start(pdev, bar),
2842			    pci_resource_len(pdev, bar), res_name))
2843			goto err_out;
2844	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 
2845		if (!__request_mem_region(pci_resource_start(pdev, bar),
2846					pci_resource_len(pdev, bar), res_name,
2847					exclusive))
2848			goto err_out;
2849	}
2850
2851	dr = find_pci_dr(pdev);
2852	if (dr)
2853		dr->region_mask |= 1 << bar;
2854
2855	return 0;
2856
2857err_out:
2858	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2859		 &pdev->resource[bar]);
2860	return -EBUSY;
2861}
2862
2863/**
2864 *	pci_request_region - Reserve PCI I/O and memory resource
2865 *	@pdev: PCI device whose resources are to be reserved
2866 *	@bar: BAR to be reserved
2867 *	@res_name: Name to be associated with resource
2868 *
2869 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2870 *	being reserved by owner @res_name.  Do not access any
2871 *	address inside the PCI regions unless this call returns
2872 *	successfully.
2873 *
2874 *	Returns 0 on success, or %EBUSY on error.  A warning
2875 *	message is also printed on failure.
2876 */
2877int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2878{
2879	return __pci_request_region(pdev, bar, res_name, 0);
2880}
2881EXPORT_SYMBOL(pci_request_region);
2882
2883/**
2884 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2885 *	@pdev: PCI device whose resources are to be reserved
2886 *	@bar: BAR to be reserved
2887 *	@res_name: Name to be associated with resource.
2888 *
2889 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2890 *	being reserved by owner @res_name.  Do not access any
2891 *	address inside the PCI regions unless this call returns
2892 *	successfully.
2893 *
2894 *	Returns 0 on success, or %EBUSY on error.  A warning
2895 *	message is also printed on failure.
2896 *
2897 *	The key difference that _exclusive makes it that userspace is
2898 *	explicitly not allowed to map the resource via /dev/mem or
2899 *	sysfs.
2900 */
2901int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
2902				 const char *res_name)
2903{
2904	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2905}
2906EXPORT_SYMBOL(pci_request_region_exclusive);
2907
2908/**
2909 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2910 * @pdev: PCI device whose resources were previously reserved
2911 * @bars: Bitmask of BARs to be released
2912 *
2913 * Release selected PCI I/O and memory resources previously reserved.
2914 * Call this function only after all use of the PCI regions has ceased.
2915 */
2916void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2917{
2918	int i;
2919
2920	for (i = 0; i < 6; i++)
2921		if (bars & (1 << i))
2922			pci_release_region(pdev, i);
2923}
2924EXPORT_SYMBOL(pci_release_selected_regions);
2925
2926static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2927					  const char *res_name, int excl)
2928{
2929	int i;
2930
2931	for (i = 0; i < 6; i++)
2932		if (bars & (1 << i))
2933			if (__pci_request_region(pdev, i, res_name, excl))
2934				goto err_out;
2935	return 0;
2936
2937err_out:
2938	while (--i >= 0)
2939		if (bars & (1 << i))
2940			pci_release_region(pdev, i);
2941
2942	return -EBUSY;
2943}
2944
2945
2946/**
2947 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2948 * @pdev: PCI device whose resources are to be reserved
2949 * @bars: Bitmask of BARs to be requested
2950 * @res_name: Name to be associated with resource
2951 */
2952int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2953				 const char *res_name)
2954{
2955	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2956}
2957EXPORT_SYMBOL(pci_request_selected_regions);
2958
2959int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
2960					   const char *res_name)
2961{
2962	return __pci_request_selected_regions(pdev, bars, res_name,
2963			IORESOURCE_EXCLUSIVE);
2964}
2965EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
2966
2967/**
2968 *	pci_release_regions - Release reserved PCI I/O and memory resources
2969 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2970 *
2971 *	Releases all PCI I/O and memory resources previously reserved by a
2972 *	successful call to pci_request_regions.  Call this function only
2973 *	after all use of the PCI regions has ceased.
2974 */
2975
2976void pci_release_regions(struct pci_dev *pdev)
2977{
2978	pci_release_selected_regions(pdev, (1 << 6) - 1);
2979}
2980EXPORT_SYMBOL(pci_release_regions);
2981
2982/**
2983 *	pci_request_regions - Reserved PCI I/O and memory resources
2984 *	@pdev: PCI device whose resources are to be reserved
2985 *	@res_name: Name to be associated with resource.
2986 *
2987 *	Mark all PCI regions associated with PCI device @pdev as
2988 *	being reserved by owner @res_name.  Do not access any
2989 *	address inside the PCI regions unless this call returns
2990 *	successfully.
2991 *
2992 *	Returns 0 on success, or %EBUSY on error.  A warning
2993 *	message is also printed on failure.
2994 */
2995int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2996{
2997	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2998}
2999EXPORT_SYMBOL(pci_request_regions);
3000
3001/**
3002 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3003 *	@pdev: PCI device whose resources are to be reserved
3004 *	@res_name: Name to be associated with resource.
3005 *
3006 *	Mark all PCI regions associated with PCI device @pdev as
3007 *	being reserved by owner @res_name.  Do not access any
3008 *	address inside the PCI regions unless this call returns
3009 *	successfully.
3010 *
3011 *	pci_request_regions_exclusive() will mark the region so that
3012 *	/dev/mem and the sysfs MMIO access will not be allowed.
3013 *
3014 *	Returns 0 on success, or %EBUSY on error.  A warning
3015 *	message is also printed on failure.
3016 */
3017int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3018{
3019	return pci_request_selected_regions_exclusive(pdev,
3020					((1 << 6) - 1), res_name);
3021}
3022EXPORT_SYMBOL(pci_request_regions_exclusive);
3023
3024/**
3025 *	pci_remap_iospace - Remap the memory mapped I/O space
3026 *	@res: Resource describing the I/O space
3027 *	@phys_addr: physical address of range to be mapped
3028 *
3029 *	Remap the memory mapped I/O space described by the @res
3030 *	and the CPU physical address @phys_addr into virtual address space.
3031 *	Only architectures that have memory mapped IO functions defined
3032 *	(and the PCI_IOBASE value defined) should call this function.
3033 */
3034int __weak pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3035{
3036#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3037	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3038
3039	if (!(res->flags & IORESOURCE_IO))
3040		return -EINVAL;
3041
3042	if (res->end > IO_SPACE_LIMIT)
3043		return -EINVAL;
3044
3045	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3046				  pgprot_device(PAGE_KERNEL));
3047#else
3048	/* this architecture does not have memory mapped I/O space,
3049	   so this function should never be called */
3050	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3051	return -ENODEV;
3052#endif
3053}
3054
3055static void __pci_set_master(struct pci_dev *dev, bool enable)
3056{
3057	u16 old_cmd, cmd;
3058
3059	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3060	if (enable)
3061		cmd = old_cmd | PCI_COMMAND_MASTER;
3062	else
3063		cmd = old_cmd & ~PCI_COMMAND_MASTER;
3064	if (cmd != old_cmd) {
3065		dev_dbg(&dev->dev, "%s bus mastering\n",
3066			enable ? "enabling" : "disabling");
3067		pci_write_config_word(dev, PCI_COMMAND, cmd);
3068	}
3069	dev->is_busmaster = enable;
3070}
3071
3072/**
3073 * pcibios_setup - process "pci=" kernel boot arguments
3074 * @str: string used to pass in "pci=" kernel boot arguments
3075 *
3076 * Process kernel boot arguments.  This is the default implementation.
3077 * Architecture specific implementations can override this as necessary.
3078 */
3079char * __weak __init pcibios_setup(char *str)
3080{
3081	return str;
3082}
3083
3084/**
3085 * pcibios_set_master - enable PCI bus-mastering for device dev
3086 * @dev: the PCI device to enable
3087 *
3088 * Enables PCI bus-mastering for the device.  This is the default
3089 * implementation.  Architecture specific implementations can override
3090 * this if necessary.
3091 */
3092void __weak pcibios_set_master(struct pci_dev *dev)
3093{
3094	u8 lat;
3095
3096	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3097	if (pci_is_pcie(dev))
3098		return;
3099
3100	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3101	if (lat < 16)
3102		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3103	else if (lat > pcibios_max_latency)
3104		lat = pcibios_max_latency;
3105	else
3106		return;
3107
3108	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3109}
3110
3111/**
3112 * pci_set_master - enables bus-mastering for device dev
3113 * @dev: the PCI device to enable
3114 *
3115 * Enables bus-mastering on the device and calls pcibios_set_master()
3116 * to do the needed arch specific settings.
3117 */
3118void pci_set_master(struct pci_dev *dev)
3119{
3120	__pci_set_master(dev, true);
3121	pcibios_set_master(dev);
3122}
3123EXPORT_SYMBOL(pci_set_master);
3124
3125/**
3126 * pci_clear_master - disables bus-mastering for device dev
3127 * @dev: the PCI device to disable
3128 */
3129void pci_clear_master(struct pci_dev *dev)
3130{
3131	__pci_set_master(dev, false);
3132}
3133EXPORT_SYMBOL(pci_clear_master);
3134
3135/**
3136 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3137 * @dev: the PCI device for which MWI is to be enabled
3138 *
3139 * Helper function for pci_set_mwi.
3140 * Originally copied from drivers/net/acenic.c.
3141 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3142 *
3143 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3144 */
3145int pci_set_cacheline_size(struct pci_dev *dev)
3146{
3147	u8 cacheline_size;
3148
3149	if (!pci_cache_line_size)
3150		return -EINVAL;
3151
3152	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3153	   equal to or multiple of the right value. */
3154	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3155	if (cacheline_size >= pci_cache_line_size &&
3156	    (cacheline_size % pci_cache_line_size) == 0)
3157		return 0;
3158
3159	/* Write the correct value. */
3160	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3161	/* Read it back. */
3162	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3163	if (cacheline_size == pci_cache_line_size)
3164		return 0;
3165
3166	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not supported\n",
3167		   pci_cache_line_size << 2);
3168
3169	return -EINVAL;
3170}
3171EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3172
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3173/**
3174 * pci_set_mwi - enables memory-write-invalidate PCI transaction
3175 * @dev: the PCI device for which MWI is enabled
3176 *
3177 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3178 *
3179 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3180 */
3181int pci_set_mwi(struct pci_dev *dev)
 
3182{
3183#ifdef PCI_DISABLE_MWI
3184	return 0;
3185#else
3186	int rc;
3187	u16 cmd;
3188
3189	rc = pci_set_cacheline_size(dev);
3190	if (rc)
3191		return rc;
3192
3193	pci_read_config_word(dev, PCI_COMMAND, &cmd);
3194	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3195		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
3196		cmd |= PCI_COMMAND_INVALIDATE;
3197		pci_write_config_word(dev, PCI_COMMAND, cmd);
3198	}
 
3199	return 0;
3200#endif
3201}
3202EXPORT_SYMBOL(pci_set_mwi);
3203
3204/**
3205 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3206 * @dev: the PCI device for which MWI is enabled
3207 *
3208 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3209 * Callers are not required to check the return value.
3210 *
3211 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3212 */
3213int pci_try_set_mwi(struct pci_dev *dev)
3214{
3215#ifdef PCI_DISABLE_MWI
3216	return 0;
3217#else
3218	return pci_set_mwi(dev);
3219#endif
3220}
3221EXPORT_SYMBOL(pci_try_set_mwi);
3222
3223/**
3224 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3225 * @dev: the PCI device to disable
3226 *
3227 * Disables PCI Memory-Write-Invalidate transaction on the device
3228 */
3229void pci_clear_mwi(struct pci_dev *dev)
 
3230{
3231#ifndef PCI_DISABLE_MWI
3232	u16 cmd;
3233
3234	pci_read_config_word(dev, PCI_COMMAND, &cmd);
3235	if (cmd & PCI_COMMAND_INVALIDATE) {
3236		cmd &= ~PCI_COMMAND_INVALIDATE;
3237		pci_write_config_word(dev, PCI_COMMAND, cmd);
3238	}
3239#endif
3240}
3241EXPORT_SYMBOL(pci_clear_mwi);
3242
3243/**
3244 * pci_intx - enables/disables PCI INTx for device dev
3245 * @pdev: the PCI device to operate on
3246 * @enable: boolean: whether to enable or disable PCI INTx
3247 *
3248 * Enables/disables PCI INTx for device dev
3249 */
3250void pci_intx(struct pci_dev *pdev, int enable)
 
3251{
3252	u16 pci_command, new;
3253
3254	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3255
3256	if (enable)
3257		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3258	else
3259		new = pci_command | PCI_COMMAND_INTX_DISABLE;
 
3260
3261	if (new != pci_command) {
3262		struct pci_devres *dr;
3263
3264		pci_write_config_word(pdev, PCI_COMMAND, new);
3265
3266		dr = find_pci_dr(pdev);
3267		if (dr && !dr->restore_intx) {
3268			dr->restore_intx = 1;
3269			dr->orig_intx = !enable;
3270		}
3271	}
3272}
3273EXPORT_SYMBOL_GPL(pci_intx);
3274
3275/**
3276 * pci_intx_mask_supported - probe for INTx masking support
3277 * @dev: the PCI device to operate on
3278 *
3279 * Check if the device dev support INTx masking via the config space
3280 * command word.
 
3281 */
3282bool pci_intx_mask_supported(struct pci_dev *dev)
3283{
3284	bool mask_supported = false;
3285	u16 orig, new;
3286
3287	if (dev->broken_intx_masking)
3288		return false;
3289
3290	pci_cfg_access_lock(dev);
3291
3292	pci_read_config_word(dev, PCI_COMMAND, &orig);
3293	pci_write_config_word(dev, PCI_COMMAND,
3294			      orig ^ PCI_COMMAND_INTX_DISABLE);
3295	pci_read_config_word(dev, PCI_COMMAND, &new);
3296
3297	/*
3298	 * There's no way to protect against hardware bugs or detect them
3299	 * reliably, but as long as we know what the value should be, let's
3300	 * go ahead and check it.
3301	 */
3302	if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3303		dev_err(&dev->dev, "Command register changed from 0x%x to 0x%x: driver or hardware bug?\n",
3304			orig, new);
3305	} else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3306		mask_supported = true;
3307		pci_write_config_word(dev, PCI_COMMAND, orig);
3308	}
3309
3310	pci_cfg_access_unlock(dev);
3311	return mask_supported;
3312}
3313EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3314
3315static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3316{
3317	struct pci_bus *bus = dev->bus;
3318	bool mask_updated = true;
3319	u32 cmd_status_dword;
3320	u16 origcmd, newcmd;
3321	unsigned long flags;
3322	bool irq_pending;
3323
3324	/*
3325	 * We do a single dword read to retrieve both command and status.
3326	 * Document assumptions that make this possible.
3327	 */
3328	BUILD_BUG_ON(PCI_COMMAND % 4);
3329	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3330
3331	raw_spin_lock_irqsave(&pci_lock, flags);
3332
3333	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3334
3335	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3336
3337	/*
3338	 * Check interrupt status register to see whether our device
3339	 * triggered the interrupt (when masking) or the next IRQ is
3340	 * already pending (when unmasking).
3341	 */
3342	if (mask != irq_pending) {
3343		mask_updated = false;
3344		goto done;
3345	}
3346
3347	origcmd = cmd_status_dword;
3348	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3349	if (mask)
3350		newcmd |= PCI_COMMAND_INTX_DISABLE;
3351	if (newcmd != origcmd)
3352		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3353
3354done:
3355	raw_spin_unlock_irqrestore(&pci_lock, flags);
3356
3357	return mask_updated;
3358}
3359
3360/**
3361 * pci_check_and_mask_intx - mask INTx on pending interrupt
3362 * @dev: the PCI device to operate on
3363 *
3364 * Check if the device dev has its INTx line asserted, mask it and
3365 * return true in that case. False is returned if not interrupt was
3366 * pending.
3367 */
3368bool pci_check_and_mask_intx(struct pci_dev *dev)
3369{
3370	return pci_check_and_set_intx_mask(dev, true);
3371}
3372EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3373
3374/**
3375 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3376 * @dev: the PCI device to operate on
3377 *
3378 * Check if the device dev has its INTx line asserted, unmask it if not
3379 * and return true. False is returned and the mask remains active if
3380 * there was still an interrupt pending.
3381 */
3382bool pci_check_and_unmask_intx(struct pci_dev *dev)
3383{
3384	return pci_check_and_set_intx_mask(dev, false);
3385}
3386EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3387
3388/**
3389 * pci_wait_for_pending_transaction - waits for pending transaction
3390 * @dev: the PCI device to operate on
3391 *
3392 * Return 0 if transaction is pending 1 otherwise.
3393 */
3394int pci_wait_for_pending_transaction(struct pci_dev *dev)
3395{
3396	if (!pci_is_pcie(dev))
3397		return 1;
3398
3399	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3400				    PCI_EXP_DEVSTA_TRPND);
3401}
3402EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3403
3404/*
3405 * We should only need to wait 100ms after FLR, but some devices take longer.
3406 * Wait for up to 1000ms for config space to return something other than -1.
3407 * Intel IGD requires this when an LCD panel is attached.  We read the 2nd
3408 * dword because VFs don't implement the 1st dword.
3409 */
3410static void pci_flr_wait(struct pci_dev *dev)
3411{
3412	int i = 0;
3413	u32 id;
3414
3415	do {
3416		msleep(100);
3417		pci_read_config_dword(dev, PCI_COMMAND, &id);
3418	} while (i++ < 10 && id == ~0);
3419
3420	if (id == ~0)
3421		dev_warn(&dev->dev, "Failed to return from FLR\n");
3422	else if (i > 1)
3423		dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
3424			 (i - 1) * 100);
3425}
 
3426
3427static int pcie_flr(struct pci_dev *dev, int probe)
3428{
 
 
3429	u32 cap;
 
 
 
 
 
3430
3431	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
3432	if (!(cap & PCI_EXP_DEVCAP_FLR))
3433		return -ENOTTY;
3434
3435	if (probe)
3436		return 0;
3437
3438	if (!pci_wait_for_pending_transaction(dev))
3439		dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3440
3441	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
3442	pci_flr_wait(dev);
3443	return 0;
3444}
3445
3446static int pci_af_flr(struct pci_dev *dev, int probe)
3447{
 
3448	int pos;
3449	u8 cap;
 
3450
3451	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3452	if (!pos)
3453		return -ENOTTY;
3454
3455	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
3456	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3457		return -ENOTTY;
3458
3459	if (probe)
3460		return 0;
3461
3462	/*
3463	 * Wait for Transaction Pending bit to clear.  A word-aligned test
3464	 * is used, so we use the conrol offset rather than status and shift
3465	 * the test bit to match.
3466	 */
3467	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3468				 PCI_AF_STATUS_TP << 8))
3469		dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
 
 
 
 
3470
 
3471	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
3472	pci_flr_wait(dev);
 
3473	return 0;
3474}
3475
3476/**
3477 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3478 * @dev: Device to reset.
3479 * @probe: If set, only check if the device can be reset this way.
3480 *
3481 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3482 * unset, it will be reinitialized internally when going from PCI_D3hot to
3483 * PCI_D0.  If that's the case and the device is not in a low-power state
3484 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3485 *
3486 * NOTE: This causes the caller to sleep for twice the device power transition
3487 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3488 * by default (i.e. unless the @dev's d3_delay field has a different value).
3489 * Moreover, only devices in D0 can be reset by this function.
3490 */
3491static int pci_pm_reset(struct pci_dev *dev, int probe)
3492{
3493	u16 csr;
3494
3495	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
3496		return -ENOTTY;
3497
3498	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3499	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3500		return -ENOTTY;
3501
3502	if (probe)
3503		return 0;
3504
3505	if (dev->current_state != PCI_D0)
3506		return -EINVAL;
3507
3508	csr &= ~PCI_PM_CTRL_STATE_MASK;
3509	csr |= PCI_D3hot;
3510	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3511	pci_dev_d3_sleep(dev);
3512
3513	csr &= ~PCI_PM_CTRL_STATE_MASK;
3514	csr |= PCI_D0;
3515	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
3516	pci_dev_d3_sleep(dev);
3517
3518	return 0;
3519}
3520
3521void pci_reset_secondary_bus(struct pci_dev *dev)
3522{
3523	u16 ctrl;
3524
3525	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
3526	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3527	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3528	/*
3529	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
3530	 * this to 2ms to ensure that we meet the minimum requirement.
3531	 */
3532	msleep(2);
3533
3534	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3535	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
3536
3537	/*
3538	 * Trhfa for conventional PCI is 2^25 clock cycles.
3539	 * Assuming a minimum 33MHz clock this results in a 1s
3540	 * delay before we can consider subordinate devices to
3541	 * be re-initialized.  PCIe has some ways to shorten this,
3542	 * but we don't make use of them yet.
3543	 */
3544	ssleep(1);
3545}
3546
3547void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
3548{
3549	pci_reset_secondary_bus(dev);
3550}
3551
3552/**
3553 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
3554 * @dev: Bridge device
3555 *
3556 * Use the bridge control register to assert reset on the secondary bus.
3557 * Devices on the secondary bus are left in power-on state.
3558 */
3559void pci_reset_bridge_secondary_bus(struct pci_dev *dev)
3560{
3561	pcibios_reset_secondary_bus(dev);
3562}
3563EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
3564
3565static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3566{
3567	struct pci_dev *pdev;
3568
3569	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
3570	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3571		return -ENOTTY;
3572
3573	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3574		if (pdev != dev)
3575			return -ENOTTY;
3576
3577	if (probe)
3578		return 0;
3579
3580	pci_reset_bridge_secondary_bus(dev->bus->self);
 
 
 
 
 
 
 
3581
3582	return 0;
3583}
3584
3585static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
3586{
3587	int rc = -ENOTTY;
3588
3589	if (!hotplug || !try_module_get(hotplug->ops->owner))
3590		return rc;
3591
3592	if (hotplug->ops->reset_slot)
3593		rc = hotplug->ops->reset_slot(hotplug, probe);
3594
3595	module_put(hotplug->ops->owner);
3596
3597	return rc;
3598}
3599
3600static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
3601{
3602	struct pci_dev *pdev;
3603
3604	if (dev->subordinate || !dev->slot ||
3605	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
3606		return -ENOTTY;
3607
3608	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3609		if (pdev != dev && pdev->slot == dev->slot)
3610			return -ENOTTY;
3611
3612	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
3613}
3614
3615static int __pci_dev_reset(struct pci_dev *dev, int probe)
3616{
3617	int rc;
3618
3619	might_sleep();
3620
 
 
 
 
 
 
3621	rc = pci_dev_specific_reset(dev, probe);
3622	if (rc != -ENOTTY)
3623		goto done;
3624
3625	rc = pcie_flr(dev, probe);
3626	if (rc != -ENOTTY)
3627		goto done;
3628
3629	rc = pci_af_flr(dev, probe);
3630	if (rc != -ENOTTY)
3631		goto done;
3632
3633	rc = pci_pm_reset(dev, probe);
3634	if (rc != -ENOTTY)
3635		goto done;
3636
3637	rc = pci_dev_reset_slot_function(dev, probe);
3638	if (rc != -ENOTTY)
3639		goto done;
3640
3641	rc = pci_parent_bus_reset(dev, probe);
3642done:
3643	return rc;
3644}
3645
3646static void pci_dev_lock(struct pci_dev *dev)
3647{
3648	pci_cfg_access_lock(dev);
3649	/* block PM suspend, driver probe, etc. */
3650	device_lock(&dev->dev);
3651}
3652
3653/* Return 1 on successful lock, 0 on contention */
3654static int pci_dev_trylock(struct pci_dev *dev)
3655{
3656	if (pci_cfg_access_trylock(dev)) {
3657		if (device_trylock(&dev->dev))
3658			return 1;
3659		pci_cfg_access_unlock(dev);
3660	}
3661
3662	return 0;
3663}
3664
3665static void pci_dev_unlock(struct pci_dev *dev)
3666{
3667	device_unlock(&dev->dev);
3668	pci_cfg_access_unlock(dev);
3669}
3670
3671/**
3672 * pci_reset_notify - notify device driver of reset
3673 * @dev: device to be notified of reset
3674 * @prepare: 'true' if device is about to be reset; 'false' if reset attempt
3675 *           completed
3676 *
3677 * Must be called prior to device access being disabled and after device
3678 * access is restored.
3679 */
3680static void pci_reset_notify(struct pci_dev *dev, bool prepare)
3681{
3682	const struct pci_error_handlers *err_handler =
3683			dev->driver ? dev->driver->err_handler : NULL;
3684	if (err_handler && err_handler->reset_notify)
3685		err_handler->reset_notify(dev, prepare);
3686}
3687
3688static void pci_dev_save_and_disable(struct pci_dev *dev)
3689{
3690	pci_reset_notify(dev, true);
3691
3692	/*
3693	 * Wake-up device prior to save.  PM registers default to D0 after
3694	 * reset and a simple register restore doesn't reliably return
3695	 * to a non-D0 state anyway.
3696	 */
3697	pci_set_power_state(dev, PCI_D0);
3698
3699	pci_save_state(dev);
3700	/*
3701	 * Disable the device by clearing the Command register, except for
3702	 * INTx-disable which is set.  This not only disables MMIO and I/O port
3703	 * BARs, but also prevents the device from being Bus Master, preventing
3704	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
3705	 * compliant devices, INTx-disable prevents legacy interrupts.
3706	 */
3707	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3708}
3709
3710static void pci_dev_restore(struct pci_dev *dev)
3711{
3712	pci_restore_state(dev);
3713	pci_reset_notify(dev, false);
3714}
3715
3716static int pci_dev_reset(struct pci_dev *dev, int probe)
3717{
3718	int rc;
3719
3720	if (!probe)
3721		pci_dev_lock(dev);
3722
3723	rc = __pci_dev_reset(dev, probe);
3724
3725	if (!probe)
3726		pci_dev_unlock(dev);
3727
3728	return rc;
3729}
3730
3731/**
3732 * __pci_reset_function - reset a PCI device function
3733 * @dev: PCI device to reset
3734 *
3735 * Some devices allow an individual function to be reset without affecting
3736 * other functions in the same device.  The PCI device must be responsive
3737 * to PCI config space in order to use this function.
3738 *
3739 * The device function is presumed to be unused when this function is called.
3740 * Resetting the device will make the contents of PCI configuration space
3741 * random, so any caller of this must be prepared to reinitialise the
3742 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3743 * etc.
3744 *
3745 * Returns 0 if the device function was successfully reset or negative if the
3746 * device doesn't support resetting a single function.
3747 */
3748int __pci_reset_function(struct pci_dev *dev)
3749{
3750	return pci_dev_reset(dev, 0);
3751}
3752EXPORT_SYMBOL_GPL(__pci_reset_function);
3753
3754/**
3755 * __pci_reset_function_locked - reset a PCI device function while holding
3756 * the @dev mutex lock.
3757 * @dev: PCI device to reset
3758 *
3759 * Some devices allow an individual function to be reset without affecting
3760 * other functions in the same device.  The PCI device must be responsive
3761 * to PCI config space in order to use this function.
3762 *
3763 * The device function is presumed to be unused and the caller is holding
3764 * the device mutex lock when this function is called.
3765 * Resetting the device will make the contents of PCI configuration space
3766 * random, so any caller of this must be prepared to reinitialise the
3767 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3768 * etc.
3769 *
3770 * Returns 0 if the device function was successfully reset or negative if the
3771 * device doesn't support resetting a single function.
3772 */
3773int __pci_reset_function_locked(struct pci_dev *dev)
3774{
3775	return __pci_dev_reset(dev, 0);
3776}
3777EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3778
3779/**
3780 * pci_probe_reset_function - check whether the device can be safely reset
3781 * @dev: PCI device to reset
3782 *
3783 * Some devices allow an individual function to be reset without affecting
3784 * other functions in the same device.  The PCI device must be responsive
3785 * to PCI config space in order to use this function.
3786 *
3787 * Returns 0 if the device function can be reset or negative if the
3788 * device doesn't support resetting a single function.
3789 */
3790int pci_probe_reset_function(struct pci_dev *dev)
3791{
3792	return pci_dev_reset(dev, 1);
3793}
3794
3795/**
3796 * pci_reset_function - quiesce and reset a PCI device function
3797 * @dev: PCI device to reset
3798 *
3799 * Some devices allow an individual function to be reset without affecting
3800 * other functions in the same device.  The PCI device must be responsive
3801 * to PCI config space in order to use this function.
3802 *
3803 * This function does not just reset the PCI portion of a device, but
3804 * clears all the state associated with the device.  This function differs
3805 * from __pci_reset_function in that it saves and restores device state
3806 * over the reset.
3807 *
3808 * Returns 0 if the device function was successfully reset or negative if the
3809 * device doesn't support resetting a single function.
3810 */
3811int pci_reset_function(struct pci_dev *dev)
3812{
3813	int rc;
3814
3815	rc = pci_dev_reset(dev, 1);
3816	if (rc)
3817		return rc;
3818
3819	pci_dev_save_and_disable(dev);
 
 
 
 
 
 
3820
3821	rc = pci_dev_reset(dev, 0);
3822
3823	pci_dev_restore(dev);
3824
3825	return rc;
3826}
3827EXPORT_SYMBOL_GPL(pci_reset_function);
3828
3829/**
3830 * pci_try_reset_function - quiesce and reset a PCI device function
3831 * @dev: PCI device to reset
3832 *
3833 * Same as above, except return -EAGAIN if unable to lock device.
3834 */
3835int pci_try_reset_function(struct pci_dev *dev)
3836{
3837	int rc;
3838
3839	rc = pci_dev_reset(dev, 1);
3840	if (rc)
3841		return rc;
3842
3843	pci_dev_save_and_disable(dev);
3844
3845	if (pci_dev_trylock(dev)) {
3846		rc = __pci_dev_reset(dev, 0);
3847		pci_dev_unlock(dev);
3848	} else
3849		rc = -EAGAIN;
3850
3851	pci_dev_restore(dev);
3852
3853	return rc;
3854}
3855EXPORT_SYMBOL_GPL(pci_try_reset_function);
3856
3857/* Do any devices on or below this bus prevent a bus reset? */
3858static bool pci_bus_resetable(struct pci_bus *bus)
3859{
3860	struct pci_dev *dev;
3861
3862	list_for_each_entry(dev, &bus->devices, bus_list) {
3863		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3864		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3865			return false;
3866	}
3867
3868	return true;
3869}
3870
3871/* Lock devices from the top of the tree down */
3872static void pci_bus_lock(struct pci_bus *bus)
3873{
3874	struct pci_dev *dev;
3875
3876	list_for_each_entry(dev, &bus->devices, bus_list) {
3877		pci_dev_lock(dev);
3878		if (dev->subordinate)
3879			pci_bus_lock(dev->subordinate);
3880	}
3881}
3882
3883/* Unlock devices from the bottom of the tree up */
3884static void pci_bus_unlock(struct pci_bus *bus)
3885{
3886	struct pci_dev *dev;
3887
3888	list_for_each_entry(dev, &bus->devices, bus_list) {
3889		if (dev->subordinate)
3890			pci_bus_unlock(dev->subordinate);
3891		pci_dev_unlock(dev);
3892	}
3893}
3894
3895/* Return 1 on successful lock, 0 on contention */
3896static int pci_bus_trylock(struct pci_bus *bus)
3897{
3898	struct pci_dev *dev;
3899
3900	list_for_each_entry(dev, &bus->devices, bus_list) {
3901		if (!pci_dev_trylock(dev))
3902			goto unlock;
3903		if (dev->subordinate) {
3904			if (!pci_bus_trylock(dev->subordinate)) {
3905				pci_dev_unlock(dev);
3906				goto unlock;
3907			}
3908		}
3909	}
3910	return 1;
3911
3912unlock:
3913	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
3914		if (dev->subordinate)
3915			pci_bus_unlock(dev->subordinate);
3916		pci_dev_unlock(dev);
3917	}
3918	return 0;
3919}
3920
3921/* Do any devices on or below this slot prevent a bus reset? */
3922static bool pci_slot_resetable(struct pci_slot *slot)
3923{
3924	struct pci_dev *dev;
3925
3926	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3927		if (!dev->slot || dev->slot != slot)
3928			continue;
3929		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
3930		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
3931			return false;
3932	}
3933
3934	return true;
3935}
3936
3937/* Lock devices from the top of the tree down */
3938static void pci_slot_lock(struct pci_slot *slot)
3939{
3940	struct pci_dev *dev;
3941
3942	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3943		if (!dev->slot || dev->slot != slot)
3944			continue;
3945		pci_dev_lock(dev);
3946		if (dev->subordinate)
3947			pci_bus_lock(dev->subordinate);
3948	}
3949}
3950
3951/* Unlock devices from the bottom of the tree up */
3952static void pci_slot_unlock(struct pci_slot *slot)
3953{
3954	struct pci_dev *dev;
3955
3956	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3957		if (!dev->slot || dev->slot != slot)
3958			continue;
3959		if (dev->subordinate)
3960			pci_bus_unlock(dev->subordinate);
3961		pci_dev_unlock(dev);
3962	}
3963}
3964
3965/* Return 1 on successful lock, 0 on contention */
3966static int pci_slot_trylock(struct pci_slot *slot)
3967{
3968	struct pci_dev *dev;
3969
3970	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
3971		if (!dev->slot || dev->slot != slot)
3972			continue;
3973		if (!pci_dev_trylock(dev))
3974			goto unlock;
3975		if (dev->subordinate) {
3976			if (!pci_bus_trylock(dev->subordinate)) {
3977				pci_dev_unlock(dev);
3978				goto unlock;
3979			}
3980		}
3981	}
3982	return 1;
3983
3984unlock:
3985	list_for_each_entry_continue_reverse(dev,
3986					     &slot->bus->devices, bus_list) {
3987		if (!dev->slot || dev->slot != slot)
3988			continue;
3989		if (dev->subordinate)
3990			pci_bus_unlock(dev->subordinate);
3991		pci_dev_unlock(dev);
3992	}
3993	return 0;
3994}
3995
3996/* Save and disable devices from the top of the tree down */
3997static void pci_bus_save_and_disable(struct pci_bus *bus)
3998{
3999	struct pci_dev *dev;
4000
4001	list_for_each_entry(dev, &bus->devices, bus_list) {
4002		pci_dev_save_and_disable(dev);
4003		if (dev->subordinate)
4004			pci_bus_save_and_disable(dev->subordinate);
4005	}
4006}
4007
4008/*
4009 * Restore devices from top of the tree down - parent bridges need to be
4010 * restored before we can get to subordinate devices.
4011 */
4012static void pci_bus_restore(struct pci_bus *bus)
4013{
4014	struct pci_dev *dev;
4015
4016	list_for_each_entry(dev, &bus->devices, bus_list) {
4017		pci_dev_restore(dev);
4018		if (dev->subordinate)
4019			pci_bus_restore(dev->subordinate);
4020	}
4021}
4022
4023/* Save and disable devices from the top of the tree down */
4024static void pci_slot_save_and_disable(struct pci_slot *slot)
4025{
4026	struct pci_dev *dev;
4027
4028	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4029		if (!dev->slot || dev->slot != slot)
4030			continue;
4031		pci_dev_save_and_disable(dev);
4032		if (dev->subordinate)
4033			pci_bus_save_and_disable(dev->subordinate);
4034	}
4035}
4036
4037/*
4038 * Restore devices from top of the tree down - parent bridges need to be
4039 * restored before we can get to subordinate devices.
4040 */
4041static void pci_slot_restore(struct pci_slot *slot)
4042{
4043	struct pci_dev *dev;
4044
4045	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4046		if (!dev->slot || dev->slot != slot)
4047			continue;
4048		pci_dev_restore(dev);
4049		if (dev->subordinate)
4050			pci_bus_restore(dev->subordinate);
4051	}
4052}
4053
4054static int pci_slot_reset(struct pci_slot *slot, int probe)
4055{
4056	int rc;
4057
4058	if (!slot || !pci_slot_resetable(slot))
4059		return -ENOTTY;
4060
4061	if (!probe)
4062		pci_slot_lock(slot);
4063
4064	might_sleep();
4065
4066	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4067
4068	if (!probe)
4069		pci_slot_unlock(slot);
4070
4071	return rc;
4072}
4073
4074/**
4075 * pci_probe_reset_slot - probe whether a PCI slot can be reset
4076 * @slot: PCI slot to probe
4077 *
4078 * Return 0 if slot can be reset, negative if a slot reset is not supported.
4079 */
4080int pci_probe_reset_slot(struct pci_slot *slot)
4081{
4082	return pci_slot_reset(slot, 1);
4083}
4084EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4085
4086/**
4087 * pci_reset_slot - reset a PCI slot
4088 * @slot: PCI slot to reset
4089 *
4090 * A PCI bus may host multiple slots, each slot may support a reset mechanism
4091 * independent of other slots.  For instance, some slots may support slot power
4092 * control.  In the case of a 1:1 bus to slot architecture, this function may
4093 * wrap the bus reset to avoid spurious slot related events such as hotplug.
4094 * Generally a slot reset should be attempted before a bus reset.  All of the
4095 * function of the slot and any subordinate buses behind the slot are reset
4096 * through this function.  PCI config space of all devices in the slot and
4097 * behind the slot is saved before and restored after reset.
4098 *
4099 * Return 0 on success, non-zero on error.
4100 */
4101int pci_reset_slot(struct pci_slot *slot)
4102{
4103	int rc;
4104
4105	rc = pci_slot_reset(slot, 1);
4106	if (rc)
4107		return rc;
4108
4109	pci_slot_save_and_disable(slot);
4110
4111	rc = pci_slot_reset(slot, 0);
4112
4113	pci_slot_restore(slot);
4114
4115	return rc;
4116}
4117EXPORT_SYMBOL_GPL(pci_reset_slot);
4118
4119/**
4120 * pci_try_reset_slot - Try to reset a PCI slot
4121 * @slot: PCI slot to reset
4122 *
4123 * Same as above except return -EAGAIN if the slot cannot be locked
4124 */
4125int pci_try_reset_slot(struct pci_slot *slot)
4126{
4127	int rc;
4128
4129	rc = pci_slot_reset(slot, 1);
4130	if (rc)
4131		return rc;
4132
4133	pci_slot_save_and_disable(slot);
4134
4135	if (pci_slot_trylock(slot)) {
4136		might_sleep();
4137		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4138		pci_slot_unlock(slot);
4139	} else
4140		rc = -EAGAIN;
4141
4142	pci_slot_restore(slot);
4143
4144	return rc;
4145}
4146EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4147
4148static int pci_bus_reset(struct pci_bus *bus, int probe)
4149{
4150	if (!bus->self || !pci_bus_resetable(bus))
4151		return -ENOTTY;
4152
4153	if (probe)
4154		return 0;
4155
4156	pci_bus_lock(bus);
4157
4158	might_sleep();
4159
4160	pci_reset_bridge_secondary_bus(bus->self);
4161
4162	pci_bus_unlock(bus);
4163
4164	return 0;
4165}
4166
4167/**
4168 * pci_probe_reset_bus - probe whether a PCI bus can be reset
4169 * @bus: PCI bus to probe
4170 *
4171 * Return 0 if bus can be reset, negative if a bus reset is not supported.
4172 */
4173int pci_probe_reset_bus(struct pci_bus *bus)
4174{
4175	return pci_bus_reset(bus, 1);
4176}
4177EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4178
4179/**
4180 * pci_reset_bus - reset a PCI bus
4181 * @bus: top level PCI bus to reset
4182 *
4183 * Do a bus reset on the given bus and any subordinate buses, saving
4184 * and restoring state of all devices.
4185 *
4186 * Return 0 on success, non-zero on error.
4187 */
4188int pci_reset_bus(struct pci_bus *bus)
4189{
4190	int rc;
4191
4192	rc = pci_bus_reset(bus, 1);
4193	if (rc)
4194		return rc;
4195
4196	pci_bus_save_and_disable(bus);
4197
4198	rc = pci_bus_reset(bus, 0);
4199
4200	pci_bus_restore(bus);
4201
4202	return rc;
4203}
4204EXPORT_SYMBOL_GPL(pci_reset_bus);
4205
4206/**
4207 * pci_try_reset_bus - Try to reset a PCI bus
4208 * @bus: top level PCI bus to reset
4209 *
4210 * Same as above except return -EAGAIN if the bus cannot be locked
4211 */
4212int pci_try_reset_bus(struct pci_bus *bus)
4213{
4214	int rc;
4215
4216	rc = pci_bus_reset(bus, 1);
4217	if (rc)
4218		return rc;
4219
4220	pci_bus_save_and_disable(bus);
4221
4222	if (pci_bus_trylock(bus)) {
4223		might_sleep();
4224		pci_reset_bridge_secondary_bus(bus->self);
4225		pci_bus_unlock(bus);
4226	} else
4227		rc = -EAGAIN;
4228
4229	pci_bus_restore(bus);
4230
4231	return rc;
4232}
4233EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4234
4235/**
4236 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4237 * @dev: PCI device to query
4238 *
4239 * Returns mmrbc: maximum designed memory read count in bytes
4240 *    or appropriate error value.
4241 */
4242int pcix_get_max_mmrbc(struct pci_dev *dev)
4243{
4244	int cap;
4245	u32 stat;
4246
4247	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4248	if (!cap)
4249		return -EINVAL;
4250
4251	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4252		return -EINVAL;
4253
4254	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4255}
4256EXPORT_SYMBOL(pcix_get_max_mmrbc);
4257
4258/**
4259 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4260 * @dev: PCI device to query
4261 *
4262 * Returns mmrbc: maximum memory read count in bytes
4263 *    or appropriate error value.
4264 */
4265int pcix_get_mmrbc(struct pci_dev *dev)
4266{
4267	int cap;
4268	u16 cmd;
4269
4270	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4271	if (!cap)
4272		return -EINVAL;
4273
4274	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4275		return -EINVAL;
4276
4277	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4278}
4279EXPORT_SYMBOL(pcix_get_mmrbc);
4280
4281/**
4282 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4283 * @dev: PCI device to query
4284 * @mmrbc: maximum memory read count in bytes
4285 *    valid values are 512, 1024, 2048, 4096
4286 *
4287 * If possible sets maximum memory read byte count, some bridges have erratas
4288 * that prevent this.
4289 */
4290int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4291{
4292	int cap;
4293	u32 stat, v, o;
4294	u16 cmd;
4295
4296	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4297		return -EINVAL;
4298
4299	v = ffs(mmrbc) - 10;
4300
4301	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4302	if (!cap)
4303		return -EINVAL;
4304
4305	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4306		return -EINVAL;
4307
4308	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4309		return -E2BIG;
4310
4311	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4312		return -EINVAL;
4313
4314	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4315	if (o != v) {
4316		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
 
4317			return -EIO;
4318
4319		cmd &= ~PCI_X_CMD_MAX_READ;
4320		cmd |= v << 2;
4321		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4322			return -EIO;
4323	}
4324	return 0;
4325}
4326EXPORT_SYMBOL(pcix_set_mmrbc);
4327
4328/**
4329 * pcie_get_readrq - get PCI Express read request size
4330 * @dev: PCI device to query
4331 *
4332 * Returns maximum memory read request in bytes
4333 *    or appropriate error value.
4334 */
4335int pcie_get_readrq(struct pci_dev *dev)
4336{
 
4337	u16 ctl;
4338
4339	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
 
 
 
 
4340
4341	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4342}
4343EXPORT_SYMBOL(pcie_get_readrq);
4344
4345/**
4346 * pcie_set_readrq - set PCI Express maximum memory read request
4347 * @dev: PCI device to query
4348 * @rq: maximum memory read count in bytes
4349 *    valid values are 128, 256, 512, 1024, 2048, 4096
4350 *
4351 * If possible sets maximum memory read request in bytes
4352 */
4353int pcie_set_readrq(struct pci_dev *dev, int rq)
4354{
4355	u16 v;
 
4356
4357	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
4358		return -EINVAL;
 
 
 
 
 
 
4359
4360	/*
4361	 * If using the "performance" PCIe config, we clamp the
4362	 * read rq size to the max packet size to prevent the
4363	 * host bridge generating requests larger than we can
4364	 * cope with
4365	 */
4366	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
4367		int mps = pcie_get_mps(dev);
4368
4369		if (mps < rq)
4370			rq = mps;
 
 
4371	}
4372
4373	v = (ffs(rq) - 8) << 12;
4374
4375	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4376						  PCI_EXP_DEVCTL_READRQ, v);
4377}
4378EXPORT_SYMBOL(pcie_set_readrq);
4379
4380/**
4381 * pcie_get_mps - get PCI Express maximum payload size
4382 * @dev: PCI device to query
4383 *
4384 * Returns maximum payload size in bytes
 
4385 */
4386int pcie_get_mps(struct pci_dev *dev)
4387{
 
4388	u16 ctl;
4389
4390	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
 
 
 
 
4391
4392	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4393}
4394EXPORT_SYMBOL(pcie_get_mps);
4395
4396/**
4397 * pcie_set_mps - set PCI Express maximum payload size
4398 * @dev: PCI device to query
4399 * @mps: maximum payload size in bytes
4400 *    valid values are 128, 256, 512, 1024, 2048, 4096
4401 *
4402 * If possible sets maximum payload size
4403 */
4404int pcie_set_mps(struct pci_dev *dev, int mps)
4405{
4406	u16 v;
 
4407
4408	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
4409		return -EINVAL;
4410
4411	v = ffs(mps) - 8;
4412	if (v > dev->pcie_mpss)
4413		return -EINVAL;
4414	v <<= 5;
4415
4416	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
4417						  PCI_EXP_DEVCTL_PAYLOAD, v);
4418}
4419EXPORT_SYMBOL(pcie_set_mps);
4420
4421/**
4422 * pcie_get_minimum_link - determine minimum link settings of a PCI device
4423 * @dev: PCI device to query
4424 * @speed: storage for minimum speed
4425 * @width: storage for minimum width
4426 *
4427 * This function will walk up the PCI device chain and determine the minimum
4428 * link width and speed of the device.
4429 */
4430int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
4431			  enum pcie_link_width *width)
4432{
4433	int ret;
4434
4435	*speed = PCI_SPEED_UNKNOWN;
4436	*width = PCIE_LNK_WIDTH_UNKNOWN;
4437
4438	while (dev) {
4439		u16 lnksta;
4440		enum pci_bus_speed next_speed;
4441		enum pcie_link_width next_width;
4442
4443		ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
4444		if (ret)
4445			return ret;
4446
4447		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
4448		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
4449			PCI_EXP_LNKSTA_NLW_SHIFT;
4450
4451		if (next_speed < *speed)
4452			*speed = next_speed;
4453
4454		if (next_width < *width)
4455			*width = next_width;
4456
4457		dev = dev->bus->self;
4458	}
4459
4460	return 0;
4461}
4462EXPORT_SYMBOL(pcie_get_minimum_link);
4463
4464/**
4465 * pci_select_bars - Make BAR mask from the type of resource
4466 * @dev: the PCI device for which BAR mask is made
4467 * @flags: resource type mask to be selected
4468 *
4469 * This helper routine makes bar mask from the type of resource.
4470 */
4471int pci_select_bars(struct pci_dev *dev, unsigned long flags)
4472{
4473	int i, bars = 0;
4474	for (i = 0; i < PCI_NUM_RESOURCES; i++)
4475		if (pci_resource_flags(dev, i) & flags)
4476			bars |= (1 << i);
4477	return bars;
4478}
4479EXPORT_SYMBOL(pci_select_bars);
4480
4481/**
4482 * pci_resource_bar - get position of the BAR associated with a resource
4483 * @dev: the PCI device
4484 * @resno: the resource number
4485 * @type: the BAR type to be filled in
4486 *
4487 * Returns BAR position in config space, or 0 if the BAR is invalid.
4488 */
4489int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
4490{
4491	int reg;
4492
4493	if (resno < PCI_ROM_RESOURCE) {
4494		*type = pci_bar_unknown;
4495		return PCI_BASE_ADDRESS_0 + 4 * resno;
4496	} else if (resno == PCI_ROM_RESOURCE) {
4497		*type = pci_bar_mem32;
4498		return dev->rom_base_reg;
4499	} else if (resno < PCI_BRIDGE_RESOURCES) {
4500		/* device specific resource */
4501		*type = pci_bar_unknown;
4502		reg = pci_iov_resource_bar(dev, resno);
4503		if (reg)
4504			return reg;
4505	}
4506
4507	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
4508	return 0;
4509}
4510
4511/* Some architectures require additional programming to enable VGA */
4512static arch_set_vga_state_t arch_set_vga_state;
4513
4514void __init pci_register_set_vga_state(arch_set_vga_state_t func)
4515{
4516	arch_set_vga_state = func;	/* NULL disables */
4517}
4518
4519static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
4520				  unsigned int command_bits, u32 flags)
4521{
4522	if (arch_set_vga_state)
4523		return arch_set_vga_state(dev, decode, command_bits,
4524						flags);
4525	return 0;
4526}
4527
4528/**
4529 * pci_set_vga_state - set VGA decode state on device and parents if requested
4530 * @dev: the PCI device
4531 * @decode: true = enable decoding, false = disable decoding
4532 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
4533 * @flags: traverse ancestors and change bridges
4534 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
4535 */
4536int pci_set_vga_state(struct pci_dev *dev, bool decode,
4537		      unsigned int command_bits, u32 flags)
4538{
4539	struct pci_bus *bus;
4540	struct pci_dev *bridge;
4541	u16 cmd;
4542	int rc;
4543
4544	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
4545
4546	/* ARCH specific VGA enables */
4547	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
4548	if (rc)
4549		return rc;
4550
4551	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
4552		pci_read_config_word(dev, PCI_COMMAND, &cmd);
4553		if (decode == true)
4554			cmd |= command_bits;
4555		else
4556			cmd &= ~command_bits;
4557		pci_write_config_word(dev, PCI_COMMAND, cmd);
4558	}
4559
4560	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
4561		return 0;
4562
4563	bus = dev->bus;
4564	while (bus) {
4565		bridge = bus->self;
4566		if (bridge) {
4567			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
4568					     &cmd);
4569			if (decode == true)
4570				cmd |= PCI_BRIDGE_CTL_VGA;
4571			else
4572				cmd &= ~PCI_BRIDGE_CTL_VGA;
4573			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
4574					      cmd);
4575		}
4576		bus = bus->parent;
4577	}
4578	return 0;
4579}
4580
4581bool pci_device_is_present(struct pci_dev *pdev)
4582{
4583	u32 v;
4584
4585	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
4586}
4587EXPORT_SYMBOL_GPL(pci_device_is_present);
4588
4589void pci_ignore_hotplug(struct pci_dev *dev)
4590{
4591	struct pci_dev *bridge = dev->bus->self;
4592
4593	dev->ignore_hotplug = 1;
4594	/* Propagate the "ignore hotplug" setting to the parent bridge. */
4595	if (bridge)
4596		bridge->ignore_hotplug = 1;
4597}
4598EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
4599
4600#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
4601static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
4602static DEFINE_SPINLOCK(resource_alignment_lock);
4603
4604/**
4605 * pci_specified_resource_alignment - get resource alignment specified by user.
4606 * @dev: the PCI device to get
4607 *
4608 * RETURNS: Resource alignment if it is specified.
4609 *          Zero if it is not specified.
4610 */
4611static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
4612{
4613	int seg, bus, slot, func, align_order, count;
4614	resource_size_t align = 0;
4615	char *p;
4616
4617	spin_lock(&resource_alignment_lock);
4618	p = resource_alignment_param;
4619	while (*p) {
4620		count = 0;
4621		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
4622							p[count] == '@') {
4623			p += count + 1;
4624		} else {
4625			align_order = -1;
4626		}
4627		if (sscanf(p, "%x:%x:%x.%x%n",
4628			&seg, &bus, &slot, &func, &count) != 4) {
4629			seg = 0;
4630			if (sscanf(p, "%x:%x.%x%n",
4631					&bus, &slot, &func, &count) != 3) {
4632				/* Invalid format */
4633				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
4634					p);
4635				break;
4636			}
4637		}
4638		p += count;
4639		if (seg == pci_domain_nr(dev->bus) &&
4640			bus == dev->bus->number &&
4641			slot == PCI_SLOT(dev->devfn) &&
4642			func == PCI_FUNC(dev->devfn)) {
4643			if (align_order == -1)
4644				align = PAGE_SIZE;
4645			else
4646				align = 1 << align_order;
 
4647			/* Found */
4648			break;
4649		}
4650		if (*p != ';' && *p != ',') {
4651			/* End of param or invalid format */
4652			break;
4653		}
4654		p++;
4655	}
4656	spin_unlock(&resource_alignment_lock);
4657	return align;
4658}
4659
4660/*
4661 * This function disables memory decoding and releases memory resources
4662 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
4663 * It also rounds up size to specified alignment.
4664 * Later on, the kernel will assign page-aligned memory resource back
4665 * to the device.
4666 */
4667void pci_reassigndev_resource_alignment(struct pci_dev *dev)
4668{
4669	int i;
4670	struct resource *r;
4671	resource_size_t align, size;
4672	u16 command;
4673
4674	/* check if specified PCI is target device to reassign */
4675	align = pci_specified_resource_alignment(dev);
4676	if (!align)
4677		return;
4678
4679	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
4680	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
4681		dev_warn(&dev->dev,
4682			"Can't reassign resources to host bridge.\n");
4683		return;
4684	}
4685
4686	dev_info(&dev->dev,
4687		"Disabling memory decoding and releasing memory resources.\n");
4688	pci_read_config_word(dev, PCI_COMMAND, &command);
4689	command &= ~PCI_COMMAND_MEMORY;
4690	pci_write_config_word(dev, PCI_COMMAND, command);
4691
4692	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
4693		r = &dev->resource[i];
4694		if (!(r->flags & IORESOURCE_MEM))
4695			continue;
4696		size = resource_size(r);
4697		if (size < align) {
4698			size = align;
4699			dev_info(&dev->dev,
4700				"Rounding up size of resource #%d to %#llx.\n",
4701				i, (unsigned long long)size);
4702		}
4703		r->flags |= IORESOURCE_UNSET;
4704		r->end = size - 1;
4705		r->start = 0;
4706	}
4707	/* Need to disable bridge's resource window,
4708	 * to enable the kernel to reassign new resource
4709	 * window later on.
4710	 */
4711	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
4712	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
4713		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
4714			r = &dev->resource[i];
4715			if (!(r->flags & IORESOURCE_MEM))
4716				continue;
4717			r->flags |= IORESOURCE_UNSET;
4718			r->end = resource_size(r) - 1;
4719			r->start = 0;
4720		}
4721		pci_disable_bridge_window(dev);
4722	}
4723}
4724
4725static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
4726{
4727	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
4728		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
4729	spin_lock(&resource_alignment_lock);
4730	strncpy(resource_alignment_param, buf, count);
4731	resource_alignment_param[count] = '\0';
4732	spin_unlock(&resource_alignment_lock);
4733	return count;
4734}
4735
4736static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
4737{
4738	size_t count;
4739	spin_lock(&resource_alignment_lock);
4740	count = snprintf(buf, size, "%s", resource_alignment_param);
4741	spin_unlock(&resource_alignment_lock);
4742	return count;
4743}
4744
4745static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
4746{
4747	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
4748}
4749
4750static ssize_t pci_resource_alignment_store(struct bus_type *bus,
4751					const char *buf, size_t count)
4752{
4753	return pci_set_resource_alignment_param(buf, count);
4754}
4755
4756BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
4757					pci_resource_alignment_store);
4758
4759static int __init pci_resource_alignment_sysfs_init(void)
4760{
4761	return bus_create_file(&pci_bus_type,
4762					&bus_attr_resource_alignment);
4763}
 
4764late_initcall(pci_resource_alignment_sysfs_init);
4765
4766static void pci_no_domains(void)
4767{
4768#ifdef CONFIG_PCI_DOMAINS
4769	pci_domains_supported = 0;
4770#endif
4771}
4772
4773#ifdef CONFIG_PCI_DOMAINS
4774static atomic_t __domain_nr = ATOMIC_INIT(-1);
4775
4776int pci_get_new_domain_nr(void)
4777{
4778	return atomic_inc_return(&__domain_nr);
4779}
4780
4781#ifdef CONFIG_PCI_DOMAINS_GENERIC
4782void pci_bus_assign_domain_nr(struct pci_bus *bus, struct device *parent)
4783{
4784	static int use_dt_domains = -1;
4785	int domain = -1;
4786
4787	if (parent)
4788		domain = of_get_pci_domain_nr(parent->of_node);
4789	/*
4790	 * Check DT domain and use_dt_domains values.
4791	 *
4792	 * If DT domain property is valid (domain >= 0) and
4793	 * use_dt_domains != 0, the DT assignment is valid since this means
4794	 * we have not previously allocated a domain number by using
4795	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
4796	 * 1, to indicate that we have just assigned a domain number from
4797	 * DT.
4798	 *
4799	 * If DT domain property value is not valid (ie domain < 0), and we
4800	 * have not previously assigned a domain number from DT
4801	 * (use_dt_domains != 1) we should assign a domain number by
4802	 * using the:
4803	 *
4804	 * pci_get_new_domain_nr()
4805	 *
4806	 * API and update the use_dt_domains value to keep track of method we
4807	 * are using to assign domain numbers (use_dt_domains = 0).
4808	 *
4809	 * All other combinations imply we have a platform that is trying
4810	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
4811	 * which is a recipe for domain mishandling and it is prevented by
4812	 * invalidating the domain value (domain = -1) and printing a
4813	 * corresponding error.
4814	 */
4815	if (domain >= 0 && use_dt_domains) {
4816		use_dt_domains = 1;
4817	} else if (domain < 0 && use_dt_domains != 1) {
4818		use_dt_domains = 0;
4819		domain = pci_get_new_domain_nr();
4820	} else {
4821		dev_err(parent, "Node %s has inconsistent \"linux,pci-domain\" property in DT\n",
4822			parent->of_node->full_name);
4823		domain = -1;
4824	}
4825
4826	bus->domain_nr = domain;
4827}
4828#endif
4829#endif
4830
4831/**
4832 * pci_ext_cfg_avail - can we access extended PCI config space?
 
4833 *
4834 * Returns 1 if we can access PCI extended config space (offsets
4835 * greater than 0xff). This is the default implementation. Architecture
4836 * implementations can override this.
4837 */
4838int __weak pci_ext_cfg_avail(void)
4839{
4840	return 1;
4841}
4842
4843void __weak pci_fixup_cardbus(struct pci_bus *bus)
4844{
4845}
4846EXPORT_SYMBOL(pci_fixup_cardbus);
4847
4848static int __init pci_setup(char *str)
4849{
4850	while (str) {
4851		char *k = strchr(str, ',');
4852		if (k)
4853			*k++ = 0;
4854		if (*str && (str = pcibios_setup(str)) && *str) {
4855			if (!strcmp(str, "nomsi")) {
4856				pci_no_msi();
4857			} else if (!strcmp(str, "noaer")) {
4858				pci_no_aer();
4859			} else if (!strncmp(str, "realloc=", 8)) {
4860				pci_realloc_get_opt(str + 8);
4861			} else if (!strncmp(str, "realloc", 7)) {
4862				pci_realloc_get_opt("on");
4863			} else if (!strcmp(str, "nodomains")) {
4864				pci_no_domains();
4865			} else if (!strncmp(str, "noari", 5)) {
4866				pcie_ari_disabled = true;
4867			} else if (!strncmp(str, "cbiosize=", 9)) {
4868				pci_cardbus_io_size = memparse(str + 9, &str);
4869			} else if (!strncmp(str, "cbmemsize=", 10)) {
4870				pci_cardbus_mem_size = memparse(str + 10, &str);
4871			} else if (!strncmp(str, "resource_alignment=", 19)) {
4872				pci_set_resource_alignment_param(str + 19,
4873							strlen(str + 19));
4874			} else if (!strncmp(str, "ecrc=", 5)) {
4875				pcie_ecrc_get_policy(str + 5);
4876			} else if (!strncmp(str, "hpiosize=", 9)) {
4877				pci_hotplug_io_size = memparse(str + 9, &str);
4878			} else if (!strncmp(str, "hpmemsize=", 10)) {
4879				pci_hotplug_mem_size = memparse(str + 10, &str);
4880			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4881				pcie_bus_config = PCIE_BUS_TUNE_OFF;
4882			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
4883				pcie_bus_config = PCIE_BUS_SAFE;
4884			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
4885				pcie_bus_config = PCIE_BUS_PERFORMANCE;
4886			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4887				pcie_bus_config = PCIE_BUS_PEER2PEER;
4888			} else if (!strncmp(str, "pcie_scan_all", 13)) {
4889				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
4890			} else {
4891				printk(KERN_ERR "PCI: Unknown option `%s'\n",
4892						str);
4893			}
4894		}
4895		str = k;
4896	}
4897	return 0;
4898}
4899early_param("pci", pci_setup);