Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 *	PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *	David Mosberger-Tang
   6 *
   7 *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
 
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
 
  12#include <linux/init.h>
 
 
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
 
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
 
 
 
  25#include <asm/setup.h>
 
 
  26#include "pci.h"
  27
  28const char *pci_power_names[] = {
  29	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  30};
  31EXPORT_SYMBOL_GPL(pci_power_names);
  32
  33int isa_dma_bridge_buggy;
  34EXPORT_SYMBOL(isa_dma_bridge_buggy);
  35
  36int pci_pci_problems;
  37EXPORT_SYMBOL(pci_pci_problems);
  38
  39unsigned int pci_pm_d3_delay;
  40
  41static void pci_pme_list_scan(struct work_struct *work);
  42
  43static LIST_HEAD(pci_pme_list);
  44static DEFINE_MUTEX(pci_pme_list_mutex);
  45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  46
  47struct pci_pme_device {
  48	struct list_head list;
  49	struct pci_dev *dev;
  50};
  51
  52#define PME_TIMEOUT 1000 /* How long between PME checks */
  53
  54static void pci_dev_d3_sleep(struct pci_dev *dev)
  55{
  56	unsigned int delay = dev->d3_delay;
  57
  58	if (delay < pci_pm_d3_delay)
  59		delay = pci_pm_d3_delay;
  60
  61	msleep(delay);
 
  62}
  63
  64#ifdef CONFIG_PCI_DOMAINS
  65int pci_domains_supported = 1;
  66#endif
  67
  68#define DEFAULT_CARDBUS_IO_SIZE		(256)
  69#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  70/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  73
  74#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  75#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  76/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  77unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  79
  80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
 
 
 
  81
  82/*
  83 * The default CLS is used if arch didn't set CLS explicitly and not
  84 * all pci devices agree on the same value.  Arch can override either
  85 * the dfl or actual value as it sees fit.  Don't forget this is
  86 * measured in 32-bit words, not bytes.
  87 */
  88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  89u8 pci_cache_line_size;
  90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  91/**
  92 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
  93 * @bus: pointer to PCI bus structure to search
  94 *
  95 * Given a PCI bus, returns the highest PCI bus number present in the set
  96 * including the given PCI bus and its list of child PCI buses.
  97 */
  98unsigned char pci_bus_max_busnr(struct pci_bus* bus)
  99{
 100	struct list_head *tmp;
 101	unsigned char max, n;
 102
 103	max = bus->subordinate;
 104	list_for_each(tmp, &bus->children) {
 105		n = pci_bus_max_busnr(pci_bus_b(tmp));
 106		if(n > max)
 107			max = n;
 108	}
 109	return max;
 110}
 111EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 112
 113#ifdef CONFIG_HAS_IOMEM
 114void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 115{
 
 
 116	/*
 117	 * Make sure the BAR is actually a memory resource, not an IO resource
 118	 */
 119	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 120		WARN_ON(1);
 121		return NULL;
 122	}
 123	return ioremap_nocache(pci_resource_start(pdev, bar),
 124				     pci_resource_len(pdev, bar));
 125}
 126EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 127#endif
 128
 129#if 0
 130/**
 131 * pci_max_busnr - returns maximum PCI bus number
 132 *
 133 * Returns the highest PCI bus number present in the system global list of
 134 * PCI buses.
 135 */
 136unsigned char __devinit
 137pci_max_busnr(void)
 138{
 139	struct pci_bus *bus = NULL;
 140	unsigned char max, n;
 141
 142	max = 0;
 143	while ((bus = pci_find_next_bus(bus)) != NULL) {
 144		n = pci_bus_max_busnr(bus);
 145		if(n > max)
 146			max = n;
 147	}
 148	return max;
 
 149}
 
 
 150
 151#endif  /*  0  */
 152
 153#define PCI_FIND_CAP_TTL	48
 154
 155static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 156				   u8 pos, int cap, int *ttl)
 157{
 158	u8 id;
 
 
 
 159
 160	while ((*ttl)--) {
 161		pci_bus_read_config_byte(bus, devfn, pos, &pos);
 162		if (pos < 0x40)
 163			break;
 164		pos &= ~3;
 165		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 166					 &id);
 
 167		if (id == 0xff)
 168			break;
 169		if (id == cap)
 170			return pos;
 171		pos += PCI_CAP_LIST_NEXT;
 172	}
 173	return 0;
 174}
 175
 176static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 177			       u8 pos, int cap)
 178{
 179	int ttl = PCI_FIND_CAP_TTL;
 180
 181	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 182}
 183
 184int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 185{
 186	return __pci_find_next_cap(dev->bus, dev->devfn,
 187				   pos + PCI_CAP_LIST_NEXT, cap);
 188}
 189EXPORT_SYMBOL_GPL(pci_find_next_capability);
 190
 191static int __pci_bus_find_cap_start(struct pci_bus *bus,
 192				    unsigned int devfn, u8 hdr_type)
 193{
 194	u16 status;
 195
 196	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 197	if (!(status & PCI_STATUS_CAP_LIST))
 198		return 0;
 199
 200	switch (hdr_type) {
 201	case PCI_HEADER_TYPE_NORMAL:
 202	case PCI_HEADER_TYPE_BRIDGE:
 203		return PCI_CAPABILITY_LIST;
 204	case PCI_HEADER_TYPE_CARDBUS:
 205		return PCI_CB_CAPABILITY_LIST;
 206	default:
 207		return 0;
 208	}
 209
 210	return 0;
 211}
 212
 213/**
 214 * pci_find_capability - query for devices' capabilities 
 215 * @dev: PCI device to query
 216 * @cap: capability code
 217 *
 218 * Tell if a device supports a given PCI capability.
 219 * Returns the address of the requested capability structure within the
 220 * device's PCI configuration space or 0 in case the device does not
 221 * support it.  Possible values for @cap:
 222 *
 223 *  %PCI_CAP_ID_PM           Power Management 
 224 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 225 *  %PCI_CAP_ID_VPD          Vital Product Data 
 226 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 227 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 228 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 229 *  %PCI_CAP_ID_PCIX         PCI-X
 230 *  %PCI_CAP_ID_EXP          PCI Express
 231 */
 232int pci_find_capability(struct pci_dev *dev, int cap)
 233{
 234	int pos;
 235
 236	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 237	if (pos)
 238		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 239
 240	return pos;
 241}
 
 242
 243/**
 244 * pci_bus_find_capability - query for devices' capabilities 
 245 * @bus:   the PCI bus to query
 246 * @devfn: PCI device to query
 247 * @cap:   capability code
 248 *
 249 * Like pci_find_capability() but works for pci devices that do not have a
 250 * pci_dev structure set up yet. 
 251 *
 252 * Returns the address of the requested capability structure within the
 253 * device's PCI configuration space or 0 in case the device does not
 254 * support it.
 255 */
 256int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 257{
 258	int pos;
 259	u8 hdr_type;
 260
 261	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 262
 263	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 264	if (pos)
 265		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 266
 267	return pos;
 268}
 
 269
 270/**
 271 * pci_find_ext_capability - Find an extended capability
 272 * @dev: PCI device to query
 
 273 * @cap: capability code
 274 *
 275 * Returns the address of the requested extended capability structure
 276 * within the device's PCI configuration space or 0 if the device does
 277 * not support it.  Possible values for @cap:
 278 *
 279 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 280 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 281 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 282 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 283 */
 284int pci_find_ext_capability(struct pci_dev *dev, int cap)
 285{
 286	u32 header;
 287	int ttl;
 288	int pos = PCI_CFG_SPACE_SIZE;
 289
 290	/* minimum 8 bytes per capability */
 291	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 292
 293	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 294		return 0;
 295
 
 
 
 296	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 297		return 0;
 298
 299	/*
 300	 * If we have no capabilities, this is indicated by cap ID,
 301	 * cap version and next pointer all being 0.
 302	 */
 303	if (header == 0)
 304		return 0;
 305
 306	while (ttl-- > 0) {
 307		if (PCI_EXT_CAP_ID(header) == cap)
 308			return pos;
 309
 310		pos = PCI_EXT_CAP_NEXT(header);
 311		if (pos < PCI_CFG_SPACE_SIZE)
 312			break;
 313
 314		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 315			break;
 316	}
 317
 318	return 0;
 319}
 320EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 321
 322/**
 323 * pci_bus_find_ext_capability - find an extended capability
 324 * @bus:   the PCI bus to query
 325 * @devfn: PCI device to query
 326 * @cap:   capability code
 327 *
 328 * Like pci_find_ext_capability() but works for pci devices that do not have a
 329 * pci_dev structure set up yet.
 
 330 *
 331 * Returns the address of the requested capability structure within the
 332 * device's PCI configuration space or 0 in case the device does not
 333 * support it.
 
 334 */
 335int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
 336				int cap)
 337{
 338	u32 header;
 339	int ttl;
 340	int pos = PCI_CFG_SPACE_SIZE;
 341
 342	/* minimum 8 bytes per capability */
 343	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 344
 345	if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 346		return 0;
 347	if (header == 0xffffffff || header == 0)
 348		return 0;
 349
 350	while (ttl-- > 0) {
 351		if (PCI_EXT_CAP_ID(header) == cap)
 352			return pos;
 353
 354		pos = PCI_EXT_CAP_NEXT(header);
 355		if (pos < PCI_CFG_SPACE_SIZE)
 356			break;
 357
 358		if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 359			break;
 360	}
 361
 362	return 0;
 363}
 
 364
 365static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 366{
 367	int rc, ttl = PCI_FIND_CAP_TTL;
 368	u8 cap, mask;
 369
 370	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 371		mask = HT_3BIT_CAP_MASK;
 372	else
 373		mask = HT_5BIT_CAP_MASK;
 374
 375	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 376				      PCI_CAP_ID_HT, &ttl);
 377	while (pos) {
 378		rc = pci_read_config_byte(dev, pos + 3, &cap);
 379		if (rc != PCIBIOS_SUCCESSFUL)
 380			return 0;
 381
 382		if ((cap & mask) == ht_cap)
 383			return pos;
 384
 385		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 386					      pos + PCI_CAP_LIST_NEXT,
 387					      PCI_CAP_ID_HT, &ttl);
 388	}
 389
 390	return 0;
 391}
 392/**
 393 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 394 * @dev: PCI device to query
 395 * @pos: Position from which to continue searching
 396 * @ht_cap: Hypertransport capability code
 397 *
 398 * To be used in conjunction with pci_find_ht_capability() to search for
 399 * all capabilities matching @ht_cap. @pos should always be a value returned
 400 * from pci_find_ht_capability().
 401 *
 402 * NB. To be 100% safe against broken PCI devices, the caller should take
 403 * steps to avoid an infinite loop.
 404 */
 405int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 406{
 407	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 408}
 409EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 410
 411/**
 412 * pci_find_ht_capability - query a device's Hypertransport capabilities
 413 * @dev: PCI device to query
 414 * @ht_cap: Hypertransport capability code
 415 *
 416 * Tell if a device supports a given Hypertransport capability.
 417 * Returns an address within the device's PCI configuration space
 418 * or 0 in case the device does not support the request capability.
 419 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 420 * which has a Hypertransport capability matching @ht_cap.
 421 */
 422int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 423{
 424	int pos;
 425
 426	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 427	if (pos)
 428		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 429
 430	return pos;
 431}
 432EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 433
 434/**
 435 * pci_find_parent_resource - return resource region of parent bus of given region
 436 * @dev: PCI device structure contains resources to be searched
 437 * @res: child resource record for which parent is sought
 438 *
 439 *  For given resource region of given device, return the resource
 440 *  region of parent bus the given region is contained in or where
 441 *  it should be allocated from.
 442 */
 443struct resource *
 444pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 445{
 446	const struct pci_bus *bus = dev->bus;
 
 447	int i;
 448	struct resource *best = NULL, *r;
 449
 450	pci_bus_for_each_resource(bus, r, i) {
 451		if (!r)
 452			continue;
 453		if (res->start && !(res->start >= r->start && res->end <= r->end))
 454			continue;	/* Not contained */
 455		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 456			continue;	/* Wrong type */
 457		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 458			return r;	/* Exact match */
 459		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 460		if (r->flags & IORESOURCE_PREFETCH)
 461			continue;
 462		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
 463		if (!best)
 464			best = r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465	}
 466	return best;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 467}
 468
 469/**
 470 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 471 * @dev: PCI device to have its BARs restored
 472 *
 473 * Restore the BAR values for a given device, so as to make it
 474 * accessible by its driver.
 475 */
 476static void
 477pci_restore_bars(struct pci_dev *dev)
 478{
 479	int i;
 480
 481	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 482		pci_update_resource(dev, i);
 483}
 484
 485static struct pci_platform_pm_ops *pci_platform_pm;
 486
 487int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 488{
 489	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 490	    || !ops->sleep_wake || !ops->can_wakeup)
 491		return -EINVAL;
 492	pci_platform_pm = ops;
 493	return 0;
 494}
 495
 496static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 497{
 498	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 499}
 500
 501static inline int platform_pci_set_power_state(struct pci_dev *dev,
 502                                                pci_power_t t)
 503{
 504	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 505}
 506
 507static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 508{
 509	return pci_platform_pm ?
 510			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 511}
 512
 513static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 514{
 515	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 
 516}
 517
 518static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 519{
 520	return pci_platform_pm ?
 521			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 522}
 523
 524static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 525{
 526	return pci_platform_pm ?
 527			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 528}
 529
 530/**
 531 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 532 *                           given PCI device
 533 * @dev: PCI device to handle.
 534 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 535 *
 536 * RETURN VALUE:
 537 * -EINVAL if the requested state is invalid.
 538 * -EIO if device does not support PCI PM or its PM capabilities register has a
 539 * wrong version, or device doesn't support the requested state.
 540 * 0 if device already is in the requested state.
 541 * 0 if device's power state has been successfully changed.
 542 */
 543static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 544{
 545	u16 pmcsr;
 546	bool need_restore = false;
 547
 548	/* Check if we're already there */
 549	if (dev->current_state == state)
 550		return 0;
 551
 552	if (!dev->pm_cap)
 553		return -EIO;
 554
 555	if (state < PCI_D0 || state > PCI_D3hot)
 556		return -EINVAL;
 557
 558	/* Validate current state:
 559	 * Can enter D0 from any state, but if we can only go deeper 
 560	 * to sleep if we're already in a low power state
 561	 */
 562	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 563	    && dev->current_state > state) {
 564		dev_err(&dev->dev, "invalid power transition "
 565			"(from state %d to %d)\n", dev->current_state, state);
 566		return -EINVAL;
 567	}
 568
 569	/* check if this device supports the desired state */
 570	if ((state == PCI_D1 && !dev->d1_support)
 571	   || (state == PCI_D2 && !dev->d2_support))
 572		return -EIO;
 573
 574	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 575
 576	/* If we're (effectively) in D3, force entire word to 0.
 577	 * This doesn't affect PME_Status, disables PME_En, and
 578	 * sets PowerState to 0.
 579	 */
 580	switch (dev->current_state) {
 581	case PCI_D0:
 582	case PCI_D1:
 583	case PCI_D2:
 584		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 585		pmcsr |= state;
 586		break;
 587	case PCI_D3hot:
 588	case PCI_D3cold:
 589	case PCI_UNKNOWN: /* Boot-up */
 590		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 591		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 592			need_restore = true;
 593		/* Fall-through: force to D0 */
 594	default:
 595		pmcsr = 0;
 596		break;
 597	}
 598
 599	/* enter specified state */
 600	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 601
 602	/* Mandatory power management transition delays */
 603	/* see PCI PM 1.1 5.6.1 table 18 */
 604	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 605		pci_dev_d3_sleep(dev);
 606	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 607		udelay(PCI_PM_D2_DELAY);
 608
 609	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 610	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 611	if (dev->current_state != state && printk_ratelimit())
 612		dev_info(&dev->dev, "Refused to change power state, "
 613			"currently in D%d\n", dev->current_state);
 614
 615	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 
 616	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 617	 * from D3hot to D0 _may_ perform an internal reset, thereby
 618	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 619	 * For example, at least some versions of the 3c905B and the
 620	 * 3c556B exhibit this behaviour.
 621	 *
 622	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 623	 * devices in a D3hot state at boot.  Consequently, we need to
 624	 * restore at least the BARs so that the device will be
 625	 * accessible to its driver.
 626	 */
 627	if (need_restore)
 628		pci_restore_bars(dev);
 629
 630	if (dev->bus->self)
 631		pcie_aspm_pm_state_change(dev->bus->self);
 632
 633	return 0;
 634}
 635
 636/**
 637 * pci_update_current_state - Read PCI power state of given device from its
 638 *                            PCI PM registers and cache it
 639 * @dev: PCI device to handle.
 640 * @state: State to cache in case the device doesn't have the PM capability
 
 
 
 
 
 
 
 641 */
 642void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 643{
 644	if (dev->pm_cap) {
 
 
 
 645		u16 pmcsr;
 646
 647		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 648		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 649	} else {
 650		dev->current_state = state;
 651	}
 652}
 653
 654/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 655 * pci_platform_power_transition - Use platform to change device power state
 656 * @dev: PCI device to handle.
 657 * @state: State to put the device into.
 658 */
 659static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 660{
 661	int error;
 662
 663	if (platform_pci_power_manageable(dev)) {
 664		error = platform_pci_set_power_state(dev, state);
 665		if (!error)
 666			pci_update_current_state(dev, state);
 667	} else {
 668		error = -ENODEV;
 669		/* Fall back to PCI_D0 if native PM is not supported */
 670		if (!dev->pm_cap)
 671			dev->current_state = PCI_D0;
 672	}
 673
 674	return error;
 675}
 676
 677/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 678 * __pci_start_power_transition - Start power transition of a PCI device
 679 * @dev: PCI device to handle.
 680 * @state: State to put the device into.
 681 */
 682static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 683{
 684	if (state == PCI_D0)
 685		pci_platform_power_transition(dev, PCI_D0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686}
 687
 688/**
 689 * __pci_complete_power_transition - Complete power transition of a PCI device
 690 * @dev: PCI device to handle.
 691 * @state: State to put the device into.
 692 *
 693 * This function should not be called directly by device drivers.
 694 */
 695int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 696{
 697	return state >= PCI_D0 ?
 698			pci_platform_power_transition(dev, state) : -EINVAL;
 
 
 
 
 
 
 
 699}
 700EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 701
 702/**
 703 * pci_set_power_state - Set the power state of a PCI device
 704 * @dev: PCI device to handle.
 705 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 706 *
 707 * Transition a device to a new power state, using the platform firmware and/or
 708 * the device's PCI PM registers.
 709 *
 710 * RETURN VALUE:
 711 * -EINVAL if the requested state is invalid.
 712 * -EIO if device does not support PCI PM or its PM capabilities register has a
 713 * wrong version, or device doesn't support the requested state.
 
 714 * 0 if device already is in the requested state.
 
 715 * 0 if device's power state has been successfully changed.
 716 */
 717int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 718{
 719	int error;
 720
 721	/* bound the state we're entering */
 722	if (state > PCI_D3hot)
 723		state = PCI_D3hot;
 724	else if (state < PCI_D0)
 725		state = PCI_D0;
 726	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 727		/*
 728		 * If the device or the parent bridge do not support PCI PM,
 729		 * ignore the request if we're doing anything other than putting
 730		 * it into D0 (which would only happen on boot).
 731		 */
 732		return 0;
 733
 
 
 
 
 734	__pci_start_power_transition(dev, state);
 735
 736	/* This device is quirked not to be put into D3, so
 737	   don't put it in D3 */
 738	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 739		return 0;
 740
 741	error = pci_raw_set_power_state(dev, state);
 
 
 
 
 
 742
 743	if (!__pci_complete_power_transition(dev, state))
 744		error = 0;
 745	/*
 746	 * When aspm_policy is "powersave" this call ensures
 747	 * that ASPM is configured.
 748	 */
 749	if (!error && dev->bus->self)
 750		pcie_aspm_powersave_config_link(dev->bus->self);
 751
 752	return error;
 753}
 
 754
 755/**
 756 * pci_choose_state - Choose the power state of a PCI device
 757 * @dev: PCI device to be suspended
 758 * @state: target sleep state for the whole system. This is the value
 759 *	that is passed to suspend() function.
 760 *
 761 * Returns PCI power state suitable for given device and given system
 762 * message.
 763 */
 764
 765pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 766{
 767	pci_power_t ret;
 768
 769	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 770		return PCI_D0;
 771
 772	ret = platform_pci_choose_state(dev);
 773	if (ret != PCI_POWER_ERROR)
 774		return ret;
 775
 776	switch (state.event) {
 777	case PM_EVENT_ON:
 778		return PCI_D0;
 779	case PM_EVENT_FREEZE:
 780	case PM_EVENT_PRETHAW:
 781		/* REVISIT both freeze and pre-thaw "should" use D0 */
 782	case PM_EVENT_SUSPEND:
 783	case PM_EVENT_HIBERNATE:
 784		return PCI_D3hot;
 785	default:
 786		dev_info(&dev->dev, "unrecognized suspend event %d\n",
 787			 state.event);
 788		BUG();
 789	}
 790	return PCI_D0;
 791}
 792
 793EXPORT_SYMBOL(pci_choose_state);
 794
 795#define PCI_EXP_SAVE_REGS	7
 796
 797#define pcie_cap_has_devctl(type, flags)	1
 798#define pcie_cap_has_lnkctl(type, flags)		\
 799		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 800		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 801		  type == PCI_EXP_TYPE_ENDPOINT ||	\
 802		  type == PCI_EXP_TYPE_LEG_END))
 803#define pcie_cap_has_sltctl(type, flags)		\
 804		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 805		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
 806		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
 807		   (flags & PCI_EXP_FLAGS_SLOT))))
 808#define pcie_cap_has_rtctl(type, flags)			\
 809		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 810		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 811		  type == PCI_EXP_TYPE_RC_EC))
 812#define pcie_cap_has_devctl2(type, flags)		\
 813		((flags & PCI_EXP_FLAGS_VERS) > 1)
 814#define pcie_cap_has_lnkctl2(type, flags)		\
 815		((flags & PCI_EXP_FLAGS_VERS) > 1)
 816#define pcie_cap_has_sltctl2(type, flags)		\
 817		((flags & PCI_EXP_FLAGS_VERS) > 1)
 818
 819static int pci_save_pcie_state(struct pci_dev *dev)
 820{
 821	int pos, i = 0;
 822	struct pci_cap_saved_state *save_state;
 823	u16 *cap;
 824	u16 flags;
 825
 826	pos = pci_pcie_cap(dev);
 827	if (!pos)
 828		return 0;
 829
 830	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 831	if (!save_state) {
 832		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 833		return -ENOMEM;
 834	}
 835	cap = (u16 *)&save_state->cap.data[0];
 836
 837	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 838
 839	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 840		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
 841	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 842		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 843	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 844		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 845	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 846		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
 847	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 848		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
 849	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 850		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
 851	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 852		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
 853
 854	return 0;
 855}
 856
 857static void pci_restore_pcie_state(struct pci_dev *dev)
 858{
 859	int i = 0, pos;
 860	struct pci_cap_saved_state *save_state;
 861	u16 *cap;
 862	u16 flags;
 863
 864	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 865	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 866	if (!save_state || pos <= 0)
 867		return;
 868	cap = (u16 *)&save_state->cap.data[0];
 869
 870	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 871
 872	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 873		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
 874	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 875		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
 876	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 877		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
 878	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 879		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
 880	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 881		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
 882	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 883		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
 884	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 885		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
 886}
 887
 888
 889static int pci_save_pcix_state(struct pci_dev *dev)
 890{
 891	int pos;
 892	struct pci_cap_saved_state *save_state;
 893
 894	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 895	if (pos <= 0)
 896		return 0;
 897
 898	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 899	if (!save_state) {
 900		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 901		return -ENOMEM;
 902	}
 903
 904	pci_read_config_word(dev, pos + PCI_X_CMD,
 905			     (u16 *)save_state->cap.data);
 906
 907	return 0;
 908}
 909
 910static void pci_restore_pcix_state(struct pci_dev *dev)
 911{
 912	int i = 0, pos;
 913	struct pci_cap_saved_state *save_state;
 914	u16 *cap;
 915
 916	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 917	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 918	if (!save_state || pos <= 0)
 919		return;
 920	cap = (u16 *)&save_state->cap.data[0];
 921
 922	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 923}
 924
 925
 926/**
 927 * pci_save_state - save the PCI configuration space of a device before suspending
 928 * @dev: - PCI device that we're dealing with
 929 */
 930int
 931pci_save_state(struct pci_dev *dev)
 932{
 933	int i;
 934	/* XXX: 100% dword access ok here? */
 935	for (i = 0; i < 16; i++)
 936		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 937	dev->state_saved = true;
 938	if ((i = pci_save_pcie_state(dev)) != 0)
 
 
 939		return i;
 940	if ((i = pci_save_pcix_state(dev)) != 0)
 
 
 941		return i;
 942	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943}
 944
 945/** 
 946 * pci_restore_state - Restore the saved state of a PCI device
 947 * @dev: - PCI device that we're dealing with
 948 */
 949void pci_restore_state(struct pci_dev *dev)
 950{
 951	int i;
 952	u32 val;
 953
 954	if (!dev->state_saved)
 955		return;
 956
 957	/* PCI Express register must be restored first */
 958	pci_restore_pcie_state(dev);
 
 
 
 
 
 
 
 
 959
 960	/*
 961	 * The Base Address register should be programmed before the command
 962	 * register(s)
 963	 */
 964	for (i = 15; i >= 0; i--) {
 965		pci_read_config_dword(dev, i * 4, &val);
 966		if (val != dev->saved_config_space[i]) {
 967			dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
 968				"space at offset %#x (was %#x, writing %#x)\n",
 969				i, val, (int)dev->saved_config_space[i]);
 970			pci_write_config_dword(dev,i * 4,
 971				dev->saved_config_space[i]);
 972		}
 973	}
 974	pci_restore_pcix_state(dev);
 975	pci_restore_msi_state(dev);
 
 
 
 976	pci_restore_iov_state(dev);
 977
 978	dev->state_saved = false;
 979}
 
 980
 981struct pci_saved_state {
 982	u32 config_space[16];
 983	struct pci_cap_saved_data cap[0];
 984};
 985
 986/**
 987 * pci_store_saved_state - Allocate and return an opaque struct containing
 988 *			   the device saved state.
 989 * @dev: PCI device that we're dealing with
 990 *
 991 * Rerturn NULL if no state or error.
 992 */
 993struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
 994{
 995	struct pci_saved_state *state;
 996	struct pci_cap_saved_state *tmp;
 997	struct pci_cap_saved_data *cap;
 998	struct hlist_node *pos;
 999	size_t size;
1000
1001	if (!dev->state_saved)
1002		return NULL;
1003
1004	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1005
1006	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1007		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1008
1009	state = kzalloc(size, GFP_KERNEL);
1010	if (!state)
1011		return NULL;
1012
1013	memcpy(state->config_space, dev->saved_config_space,
1014	       sizeof(state->config_space));
1015
1016	cap = state->cap;
1017	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1018		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1019		memcpy(cap, &tmp->cap, len);
1020		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1021	}
1022	/* Empty cap_save terminates list */
1023
1024	return state;
1025}
1026EXPORT_SYMBOL_GPL(pci_store_saved_state);
1027
1028/**
1029 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1030 * @dev: PCI device that we're dealing with
1031 * @state: Saved state returned from pci_store_saved_state()
1032 */
1033int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
 
1034{
1035	struct pci_cap_saved_data *cap;
1036
1037	dev->state_saved = false;
1038
1039	if (!state)
1040		return 0;
1041
1042	memcpy(dev->saved_config_space, state->config_space,
1043	       sizeof(state->config_space));
1044
1045	cap = state->cap;
1046	while (cap->size) {
1047		struct pci_cap_saved_state *tmp;
1048
1049		tmp = pci_find_saved_cap(dev, cap->cap_nr);
1050		if (!tmp || tmp->cap.size != cap->size)
1051			return -EINVAL;
1052
1053		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1054		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1055		       sizeof(struct pci_cap_saved_data) + cap->size);
1056	}
1057
1058	dev->state_saved = true;
1059	return 0;
1060}
1061EXPORT_SYMBOL_GPL(pci_load_saved_state);
1062
1063/**
1064 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1065 *				   and free the memory allocated for it.
1066 * @dev: PCI device that we're dealing with
1067 * @state: Pointer to saved state returned from pci_store_saved_state()
1068 */
1069int pci_load_and_free_saved_state(struct pci_dev *dev,
1070				  struct pci_saved_state **state)
1071{
1072	int ret = pci_load_saved_state(dev, *state);
1073	kfree(*state);
1074	*state = NULL;
1075	return ret;
1076}
1077EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1078
 
 
 
 
 
1079static int do_pci_enable_device(struct pci_dev *dev, int bars)
1080{
1081	int err;
 
 
 
1082
1083	err = pci_set_power_state(dev, PCI_D0);
1084	if (err < 0 && err != -EIO)
1085		return err;
 
 
 
 
 
1086	err = pcibios_enable_device(dev, bars);
1087	if (err < 0)
1088		return err;
1089	pci_fixup_device(pci_fixup_enable, dev);
1090
 
 
 
 
 
 
 
 
 
 
 
1091	return 0;
1092}
1093
1094/**
1095 * pci_reenable_device - Resume abandoned device
1096 * @dev: PCI device to be resumed
1097 *
1098 *  Note this function is a backend of pci_default_resume and is not supposed
1099 *  to be called by normal code, write proper resume handler and use it instead.
1100 */
1101int pci_reenable_device(struct pci_dev *dev)
1102{
1103	if (pci_is_enabled(dev))
1104		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1105	return 0;
1106}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
1108static int __pci_enable_device_flags(struct pci_dev *dev,
1109				     resource_size_t flags)
1110{
 
1111	int err;
1112	int i, bars = 0;
1113
1114	/*
1115	 * Power state could be unknown at this point, either due to a fresh
1116	 * boot or a device removal call.  So get the current power state
1117	 * so that things like MSI message writing will behave as expected
1118	 * (e.g. if the device really is in D0 at enable time).
1119	 */
1120	if (dev->pm_cap) {
1121		u16 pmcsr;
1122		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1123		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1124	}
1125
1126	if (atomic_add_return(1, &dev->enable_cnt) > 1)
1127		return 0;		/* already enabled */
1128
1129	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
 
 
 
 
 
 
 
 
1130		if (dev->resource[i].flags & flags)
1131			bars |= (1 << i);
1132
1133	err = do_pci_enable_device(dev, bars);
1134	if (err < 0)
1135		atomic_dec(&dev->enable_cnt);
1136	return err;
1137}
1138
1139/**
1140 * pci_enable_device_io - Initialize a device for use with IO space
1141 * @dev: PCI device to be initialized
1142 *
1143 *  Initialize device before it's used by a driver. Ask low-level code
1144 *  to enable I/O resources. Wake up the device if it was suspended.
1145 *  Beware, this function can fail.
1146 */
1147int pci_enable_device_io(struct pci_dev *dev)
1148{
1149	return __pci_enable_device_flags(dev, IORESOURCE_IO);
1150}
 
1151
1152/**
1153 * pci_enable_device_mem - Initialize a device for use with Memory space
1154 * @dev: PCI device to be initialized
1155 *
1156 *  Initialize device before it's used by a driver. Ask low-level code
1157 *  to enable Memory resources. Wake up the device if it was suspended.
1158 *  Beware, this function can fail.
1159 */
1160int pci_enable_device_mem(struct pci_dev *dev)
1161{
1162	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1163}
 
1164
1165/**
1166 * pci_enable_device - Initialize device before it's used by a driver.
1167 * @dev: PCI device to be initialized
1168 *
1169 *  Initialize device before it's used by a driver. Ask low-level code
1170 *  to enable I/O and memory. Wake up the device if it was suspended.
1171 *  Beware, this function can fail.
1172 *
1173 *  Note we don't actually enable the device many times if we call
1174 *  this function repeatedly (we just increment the count).
1175 */
1176int pci_enable_device(struct pci_dev *dev)
1177{
1178	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1179}
 
1180
1181/*
1182 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1183 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1184 * there's no need to track it separately.  pci_devres is initialized
1185 * when a device is enabled using managed PCI device enable interface.
1186 */
1187struct pci_devres {
1188	unsigned int enabled:1;
1189	unsigned int pinned:1;
1190	unsigned int orig_intx:1;
1191	unsigned int restore_intx:1;
 
1192	u32 region_mask;
1193};
1194
1195static void pcim_release(struct device *gendev, void *res)
1196{
1197	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1198	struct pci_devres *this = res;
1199	int i;
1200
1201	if (dev->msi_enabled)
1202		pci_disable_msi(dev);
1203	if (dev->msix_enabled)
1204		pci_disable_msix(dev);
1205
1206	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1207		if (this->region_mask & (1 << i))
1208			pci_release_region(dev, i);
1209
 
 
 
1210	if (this->restore_intx)
1211		pci_intx(dev, this->orig_intx);
1212
1213	if (this->enabled && !this->pinned)
1214		pci_disable_device(dev);
1215}
1216
1217static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1218{
1219	struct pci_devres *dr, *new_dr;
1220
1221	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1222	if (dr)
1223		return dr;
1224
1225	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1226	if (!new_dr)
1227		return NULL;
1228	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1229}
1230
1231static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1232{
1233	if (pci_is_managed(pdev))
1234		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1235	return NULL;
1236}
1237
1238/**
1239 * pcim_enable_device - Managed pci_enable_device()
1240 * @pdev: PCI device to be initialized
1241 *
1242 * Managed pci_enable_device().
1243 */
1244int pcim_enable_device(struct pci_dev *pdev)
1245{
1246	struct pci_devres *dr;
1247	int rc;
1248
1249	dr = get_pci_dr(pdev);
1250	if (unlikely(!dr))
1251		return -ENOMEM;
1252	if (dr->enabled)
1253		return 0;
1254
1255	rc = pci_enable_device(pdev);
1256	if (!rc) {
1257		pdev->is_managed = 1;
1258		dr->enabled = 1;
1259	}
1260	return rc;
1261}
 
1262
1263/**
1264 * pcim_pin_device - Pin managed PCI device
1265 * @pdev: PCI device to pin
1266 *
1267 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1268 * driver detach.  @pdev must have been enabled with
1269 * pcim_enable_device().
1270 */
1271void pcim_pin_device(struct pci_dev *pdev)
1272{
1273	struct pci_devres *dr;
1274
1275	dr = find_pci_dr(pdev);
1276	WARN_ON(!dr || !dr->enabled);
1277	if (dr)
1278		dr->pinned = 1;
1279}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280
1281/**
1282 * pcibios_disable_device - disable arch specific PCI resources for device dev
1283 * @dev: the PCI device to disable
1284 *
1285 * Disables architecture specific PCI resources for the device. This
1286 * is the default implementation. Architecture implementations can
1287 * override this.
1288 */
1289void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
 
 
 
 
 
 
 
 
 
 
 
1290
1291static void do_pci_disable_device(struct pci_dev *dev)
1292{
1293	u16 pci_command;
1294
1295	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1296	if (pci_command & PCI_COMMAND_MASTER) {
1297		pci_command &= ~PCI_COMMAND_MASTER;
1298		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1299	}
1300
1301	pcibios_disable_device(dev);
1302}
1303
1304/**
1305 * pci_disable_enabled_device - Disable device without updating enable_cnt
1306 * @dev: PCI device to disable
1307 *
1308 * NOTE: This function is a backend of PCI power management routines and is
1309 * not supposed to be called drivers.
1310 */
1311void pci_disable_enabled_device(struct pci_dev *dev)
1312{
1313	if (pci_is_enabled(dev))
1314		do_pci_disable_device(dev);
1315}
1316
1317/**
1318 * pci_disable_device - Disable PCI device after use
1319 * @dev: PCI device to be disabled
1320 *
1321 * Signal to the system that the PCI device is not in use by the system
1322 * anymore.  This only involves disabling PCI bus-mastering, if active.
1323 *
1324 * Note we don't actually disable the device until all callers of
1325 * pci_enable_device() have called pci_disable_device().
1326 */
1327void
1328pci_disable_device(struct pci_dev *dev)
1329{
1330	struct pci_devres *dr;
1331
1332	dr = find_pci_dr(dev);
1333	if (dr)
1334		dr->enabled = 0;
1335
1336	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
 
 
 
1337		return;
1338
1339	do_pci_disable_device(dev);
1340
1341	dev->is_busmaster = 0;
1342}
 
1343
1344/**
1345 * pcibios_set_pcie_reset_state - set reset state for device dev
1346 * @dev: the PCIe device reset
1347 * @state: Reset state to enter into
1348 *
1349 *
1350 * Sets the PCIe reset state for the device. This is the default
1351 * implementation. Architecture implementations can override this.
1352 */
1353int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1354							enum pcie_reset_state state)
1355{
1356	return -EINVAL;
1357}
1358
1359/**
1360 * pci_set_pcie_reset_state - set reset state for device dev
1361 * @dev: the PCIe device reset
1362 * @state: Reset state to enter into
1363 *
1364 *
1365 * Sets the PCI reset state for the device.
1366 */
1367int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1368{
1369	return pcibios_set_pcie_reset_state(dev, state);
1370}
 
 
 
 
 
 
 
 
 
 
1371
1372/**
1373 * pci_check_pme_status - Check if given device has generated PME.
1374 * @dev: Device to check.
1375 *
1376 * Check the PME status of the device and if set, clear it and clear PME enable
1377 * (if set).  Return 'true' if PME status and PME enable were both set or
1378 * 'false' otherwise.
1379 */
1380bool pci_check_pme_status(struct pci_dev *dev)
1381{
1382	int pmcsr_pos;
1383	u16 pmcsr;
1384	bool ret = false;
1385
1386	if (!dev->pm_cap)
1387		return false;
1388
1389	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1390	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1391	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1392		return false;
1393
1394	/* Clear PME status. */
1395	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1396	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1397		/* Disable PME to avoid interrupt flood. */
1398		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1399		ret = true;
1400	}
1401
1402	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1403
1404	return ret;
1405}
1406
1407/**
1408 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1409 * @dev: Device to handle.
1410 * @ign: Ignored.
1411 *
1412 * Check if @dev has generated PME and queue a resume request for it in that
1413 * case.
1414 */
1415static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1416{
 
 
 
1417	if (pci_check_pme_status(dev)) {
1418		pci_wakeup_event(dev);
1419		pm_request_resume(&dev->dev);
1420	}
1421	return 0;
1422}
1423
1424/**
1425 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1426 * @bus: Top bus of the subtree to walk.
1427 */
1428void pci_pme_wakeup_bus(struct pci_bus *bus)
1429{
1430	if (bus)
1431		pci_walk_bus(bus, pci_pme_wakeup, NULL);
1432}
1433
 
1434/**
1435 * pci_pme_capable - check the capability of PCI device to generate PME#
1436 * @dev: PCI device to handle.
1437 * @state: PCI state from which device will issue PME#.
1438 */
1439bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1440{
1441	if (!dev->pm_cap)
1442		return false;
1443
1444	return !!(dev->pme_support & (1 << state));
1445}
 
1446
1447static void pci_pme_list_scan(struct work_struct *work)
1448{
1449	struct pci_pme_device *pme_dev;
1450
1451	mutex_lock(&pci_pme_list_mutex);
1452	if (!list_empty(&pci_pme_list)) {
1453		list_for_each_entry(pme_dev, &pci_pme_list, list)
 
 
 
 
 
 
 
 
 
 
1454			pci_pme_wakeup(pme_dev->dev, NULL);
1455		schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
 
 
 
1456	}
 
 
 
1457	mutex_unlock(&pci_pme_list_mutex);
1458}
1459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460/**
1461 * pci_external_pme - is a device an external PCI PME source?
1462 * @dev: PCI device to check
1463 *
1464 */
1465
1466static bool pci_external_pme(struct pci_dev *dev)
1467{
1468	if (pci_is_pcie(dev) || dev->bus->number == 0)
1469		return false;
1470	return true;
 
 
 
 
 
 
 
 
 
 
 
1471}
1472
1473/**
1474 * pci_pme_active - enable or disable PCI device's PME# function
1475 * @dev: PCI device to handle.
1476 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1477 *
1478 * The caller must verify that the device is capable of generating PME# before
1479 * calling this function with @enable equal to 'true'.
1480 */
1481void pci_pme_active(struct pci_dev *dev, bool enable)
1482{
1483	u16 pmcsr;
1484
1485	if (!dev->pm_cap)
1486		return;
1487
1488	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1489	/* Clear PME_Status by writing 1 to it and enable PME# */
1490	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1491	if (!enable)
1492		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1493
1494	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1495
1496	/* PCI (as opposed to PCIe) PME requires that the device have
1497	   its PME# line hooked up correctly. Not all hardware vendors
1498	   do this, so the PME never gets delivered and the device
1499	   remains asleep. The easiest way around this is to
1500	   periodically walk the list of suspended devices and check
1501	   whether any have their PME flag set. The assumption is that
1502	   we'll wake up often enough anyway that this won't be a huge
1503	   hit, and the power savings from the devices will still be a
1504	   win. */
 
 
 
 
 
 
 
 
 
 
1505
1506	if (pci_external_pme(dev)) {
1507		struct pci_pme_device *pme_dev;
1508		if (enable) {
1509			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1510					  GFP_KERNEL);
1511			if (!pme_dev)
1512				goto out;
 
 
1513			pme_dev->dev = dev;
1514			mutex_lock(&pci_pme_list_mutex);
1515			list_add(&pme_dev->list, &pci_pme_list);
1516			if (list_is_singular(&pci_pme_list))
1517				schedule_delayed_work(&pci_pme_work,
1518						      msecs_to_jiffies(PME_TIMEOUT));
 
1519			mutex_unlock(&pci_pme_list_mutex);
1520		} else {
1521			mutex_lock(&pci_pme_list_mutex);
1522			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1523				if (pme_dev->dev == dev) {
1524					list_del(&pme_dev->list);
1525					kfree(pme_dev);
1526					break;
1527				}
1528			}
1529			mutex_unlock(&pci_pme_list_mutex);
1530		}
1531	}
1532
1533out:
1534	dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1535			enable ? "enabled" : "disabled");
1536}
 
1537
1538/**
1539 * __pci_enable_wake - enable PCI device as wakeup event source
1540 * @dev: PCI device affected
1541 * @state: PCI state from which device will issue wakeup events
1542 * @runtime: True if the events are to be generated at run time
1543 * @enable: True to enable event generation; false to disable
1544 *
1545 * This enables the device as a wakeup event source, or disables it.
1546 * When such events involves platform-specific hooks, those hooks are
1547 * called automatically by this routine.
1548 *
1549 * Devices with legacy power management (no standard PCI PM capabilities)
1550 * always require such platform hooks.
1551 *
1552 * RETURN VALUE:
1553 * 0 is returned on success
1554 * -EINVAL is returned if device is not supposed to wake up the system
1555 * Error code depending on the platform is returned if both the platform and
1556 * the native mechanism fail to enable the generation of wake-up events
1557 */
1558int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1559		      bool runtime, bool enable)
1560{
1561	int ret = 0;
1562
1563	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1564		return -EINVAL;
 
 
 
 
1565
1566	/* Don't do the same thing twice in a row for one device. */
1567	if (!!enable == !!dev->wakeup_prepared)
1568		return 0;
1569
1570	/*
1571	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1572	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1573	 * enable.  To disable wake-up we call the platform first, for symmetry.
1574	 */
1575
1576	if (enable) {
1577		int error;
1578
1579		if (pci_pme_capable(dev, state))
1580			pci_pme_active(dev, true);
1581		else
1582			ret = 1;
1583		error = runtime ? platform_pci_run_wake(dev, true) :
1584					platform_pci_sleep_wake(dev, true);
1585		if (ret)
1586			ret = error;
1587		if (!ret)
1588			dev->wakeup_prepared = true;
1589	} else {
1590		if (runtime)
1591			platform_pci_run_wake(dev, false);
1592		else
1593			platform_pci_sleep_wake(dev, false);
1594		pci_pme_active(dev, false);
1595		dev->wakeup_prepared = false;
1596	}
1597
1598	return ret;
1599}
1600EXPORT_SYMBOL(__pci_enable_wake);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601
1602/**
1603 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1604 * @dev: PCI device to prepare
1605 * @enable: True to enable wake-up event generation; false to disable
1606 *
1607 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1608 * and this function allows them to set that up cleanly - pci_enable_wake()
1609 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1610 * ordering constraints.
1611 *
1612 * This function only returns error code if the device is not capable of
1613 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1614 * enable wake-up power for it.
1615 */
1616int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1617{
1618	return pci_pme_capable(dev, PCI_D3cold) ?
1619			pci_enable_wake(dev, PCI_D3cold, enable) :
1620			pci_enable_wake(dev, PCI_D3hot, enable);
1621}
 
1622
1623/**
1624 * pci_target_state - find an appropriate low power state for a given PCI dev
1625 * @dev: PCI device
 
1626 *
1627 * Use underlying platform code to find a supported low power state for @dev.
1628 * If the platform can't manage @dev, return the deepest state from which it
1629 * can generate wake events, based on any available PME info.
1630 */
1631pci_power_t pci_target_state(struct pci_dev *dev)
1632{
1633	pci_power_t target_state = PCI_D3hot;
1634
1635	if (platform_pci_power_manageable(dev)) {
1636		/*
1637		 * Call the platform to choose the target state of the device
1638		 * and enable wake-up from this state if supported.
1639		 */
1640		pci_power_t state = platform_pci_choose_state(dev);
1641
1642		switch (state) {
1643		case PCI_POWER_ERROR:
1644		case PCI_UNKNOWN:
1645			break;
1646		case PCI_D1:
1647		case PCI_D2:
1648			if (pci_no_d1d2(dev))
1649				break;
1650		default:
1651			target_state = state;
1652		}
1653	} else if (!dev->pm_cap) {
 
 
 
 
1654		target_state = PCI_D0;
1655	} else if (device_may_wakeup(&dev->dev)) {
 
 
 
 
 
 
 
 
 
1656		/*
1657		 * Find the deepest state from which the device can generate
1658		 * wake-up events, make it the target state and enable device
1659		 * to generate PME#.
1660		 */
1661		if (dev->pme_support) {
1662			while (target_state
1663			      && !(dev->pme_support & (1 << target_state)))
1664				target_state--;
1665		}
1666	}
1667
1668	return target_state;
1669}
1670
1671/**
1672 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1673 * @dev: Device to handle.
1674 *
1675 * Choose the power state appropriate for the device depending on whether
1676 * it can wake up the system and/or is power manageable by the platform
1677 * (PCI_D3hot is the default) and put the device into that state.
1678 */
1679int pci_prepare_to_sleep(struct pci_dev *dev)
1680{
1681	pci_power_t target_state = pci_target_state(dev);
 
1682	int error;
1683
1684	if (target_state == PCI_POWER_ERROR)
1685		return -EIO;
1686
1687	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1688
1689	error = pci_set_power_state(dev, target_state);
1690
1691	if (error)
1692		pci_enable_wake(dev, target_state, false);
1693
1694	return error;
1695}
 
1696
1697/**
1698 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
1699 * @dev: Device to handle.
1700 *
1701 * Disable device's system wake-up capability and put it into D0.
1702 */
1703int pci_back_from_sleep(struct pci_dev *dev)
1704{
1705	pci_enable_wake(dev, PCI_D0, false);
1706	return pci_set_power_state(dev, PCI_D0);
1707}
 
1708
1709/**
1710 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1711 * @dev: PCI device being suspended.
1712 *
1713 * Prepare @dev to generate wake-up events at run time and put it into a low
1714 * power state.
1715 */
1716int pci_finish_runtime_suspend(struct pci_dev *dev)
1717{
1718	pci_power_t target_state = pci_target_state(dev);
1719	int error;
1720
 
1721	if (target_state == PCI_POWER_ERROR)
1722		return -EIO;
1723
1724	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
 
 
1725
1726	error = pci_set_power_state(dev, target_state);
1727
1728	if (error)
1729		__pci_enable_wake(dev, target_state, true, false);
 
 
1730
1731	return error;
1732}
1733
1734/**
1735 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1736 * @dev: Device to check.
1737 *
1738 * Return true if the device itself is cabable of generating wake-up events
1739 * (through the platform or using the native PCIe PME) or if the device supports
1740 * PME and one of its upstream bridges can generate wake-up events.
1741 */
1742bool pci_dev_run_wake(struct pci_dev *dev)
1743{
1744	struct pci_bus *bus = dev->bus;
1745
1746	if (device_run_wake(&dev->dev))
1747		return true;
1748
1749	if (!dev->pme_support)
1750		return false;
1751
 
 
 
 
 
 
 
1752	while (bus->parent) {
1753		struct pci_dev *bridge = bus->self;
1754
1755		if (device_run_wake(&bridge->dev))
1756			return true;
1757
1758		bus = bus->parent;
1759	}
1760
1761	/* We have reached the root bus. */
1762	if (bus->bridge)
1763		return device_run_wake(bus->bridge);
1764
1765	return false;
1766}
1767EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1768
1769/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1770 * pci_pm_init - Initialize PM functions of given PCI device
1771 * @dev: PCI device to handle.
1772 */
1773void pci_pm_init(struct pci_dev *dev)
1774{
1775	int pm;
1776	u16 pmc;
1777
1778	pm_runtime_forbid(&dev->dev);
 
 
1779	device_enable_async_suspend(&dev->dev);
1780	dev->wakeup_prepared = false;
1781
1782	dev->pm_cap = 0;
 
1783
1784	/* find PCI PM capability in list */
1785	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1786	if (!pm)
1787		return;
1788	/* Check device's ability to generate PME# */
1789	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1790
1791	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1792		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1793			pmc & PCI_PM_CAP_VER_MASK);
1794		return;
1795	}
1796
1797	dev->pm_cap = pm;
1798	dev->d3_delay = PCI_PM_D3_WAIT;
 
 
 
1799
1800	dev->d1_support = false;
1801	dev->d2_support = false;
1802	if (!pci_no_d1d2(dev)) {
1803		if (pmc & PCI_PM_CAP_D1)
1804			dev->d1_support = true;
1805		if (pmc & PCI_PM_CAP_D2)
1806			dev->d2_support = true;
1807
1808		if (dev->d1_support || dev->d2_support)
1809			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1810				   dev->d1_support ? " D1" : "",
1811				   dev->d2_support ? " D2" : "");
1812	}
1813
1814	pmc &= PCI_PM_CAP_PME_MASK;
1815	if (pmc) {
1816		dev_printk(KERN_DEBUG, &dev->dev,
1817			 "PME# supported from%s%s%s%s%s\n",
1818			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1819			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1820			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1821			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1822			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1823		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
 
1824		/*
1825		 * Make device's PM flags reflect the wake-up capability, but
1826		 * let the user space enable it to wake up the system as needed.
1827		 */
1828		device_set_wakeup_capable(&dev->dev, true);
1829		/* Disable the PME# generation functionality */
1830		pci_pme_active(dev, false);
1831	} else {
1832		dev->pme_support = 0;
1833	}
1834}
1835
1836/**
1837 * platform_pci_wakeup_init - init platform wakeup if present
1838 * @dev: PCI device
1839 *
1840 * Some devices don't have PCI PM caps but can still generate wakeup
1841 * events through platform methods (like ACPI events).  If @dev supports
1842 * platform wakeup events, set the device flag to indicate as much.  This
1843 * may be redundant if the device also supports PCI PM caps, but double
1844 * initialization should be safe in that case.
1845 */
1846void platform_pci_wakeup_init(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1847{
1848	if (!platform_pci_can_wakeup(dev))
 
 
 
 
 
 
 
1849		return;
1850
1851	device_set_wakeup_capable(&dev->dev, true);
1852	platform_pci_sleep_wake(dev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1853}
1854
1855/**
1856 * pci_add_save_buffer - allocate buffer for saving given capability registers
 
1857 * @dev: the PCI device
1858 * @cap: the capability to allocate the buffer for
 
1859 * @size: requested size of the buffer
1860 */
1861static int pci_add_cap_save_buffer(
1862	struct pci_dev *dev, char cap, unsigned int size)
1863{
1864	int pos;
1865	struct pci_cap_saved_state *save_state;
1866
1867	pos = pci_find_capability(dev, cap);
1868	if (pos <= 0)
 
 
 
 
1869		return 0;
1870
1871	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1872	if (!save_state)
1873		return -ENOMEM;
1874
1875	save_state->cap.cap_nr = cap;
 
1876	save_state->cap.size = size;
1877	pci_add_saved_cap(dev, save_state);
1878
1879	return 0;
1880}
1881
 
 
 
 
 
 
 
 
 
 
1882/**
1883 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1884 * @dev: the PCI device
1885 */
1886void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1887{
1888	int error;
1889
1890	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1891					PCI_EXP_SAVE_REGS * sizeof(u16));
1892	if (error)
1893		dev_err(&dev->dev,
1894			"unable to preallocate PCI Express save buffer\n");
1895
1896	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1897	if (error)
1898		dev_err(&dev->dev,
1899			"unable to preallocate PCI-X save buffer\n");
 
 
 
 
 
 
 
 
 
 
1900}
1901
1902/**
1903 * pci_enable_ari - enable ARI forwarding if hardware support it
1904 * @dev: the PCI device
 
 
 
1905 */
1906void pci_enable_ari(struct pci_dev *dev)
1907{
1908	int pos;
1909	u32 cap;
1910	u16 flags, ctrl;
1911	struct pci_dev *bridge;
1912
1913	if (!pci_is_pcie(dev) || dev->devfn)
1914		return;
1915
1916	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1917	if (!pos)
1918		return;
1919
1920	bridge = dev->bus->self;
1921	if (!bridge || !pci_is_pcie(bridge))
1922		return;
1923
1924	pos = pci_pcie_cap(bridge);
1925	if (!pos)
1926		return;
1927
1928	/* ARI is a PCIe v2 feature */
1929	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1930	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
1931		return;
1932
1933	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1934	if (!(cap & PCI_EXP_DEVCAP2_ARI))
1935		return;
1936
1937	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1938	ctrl |= PCI_EXP_DEVCTL2_ARI;
1939	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
1940
1941	bridge->ari_enabled = 1;
 
 
 
 
1942}
1943
 
 
1944/**
1945 * pci_enable_ido - enable ID-based ordering on a device
1946 * @dev: the PCI device
1947 * @type: which types of IDO to enable
1948 *
1949 * Enable ID-based ordering on @dev.  @type can contain the bits
1950 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
1951 * which types of transactions are allowed to be re-ordered.
1952 */
1953void pci_enable_ido(struct pci_dev *dev, unsigned long type)
1954{
1955	int pos;
1956	u16 ctrl;
1957
1958	pos = pci_pcie_cap(dev);
1959	if (!pos)
1960		return;
1961
1962	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1963	if (type & PCI_EXP_IDO_REQUEST)
1964		ctrl |= PCI_EXP_IDO_REQ_EN;
1965	if (type & PCI_EXP_IDO_COMPLETION)
1966		ctrl |= PCI_EXP_IDO_CMP_EN;
1967	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1968}
1969EXPORT_SYMBOL(pci_enable_ido);
1970
1971/**
1972 * pci_disable_ido - disable ID-based ordering on a device
1973 * @dev: the PCI device
1974 * @type: which types of IDO to disable
1975 */
1976void pci_disable_ido(struct pci_dev *dev, unsigned long type)
1977{
1978	int pos;
 
1979	u16 ctrl;
1980
1981	if (!pci_is_pcie(dev))
1982		return;
1983
1984	pos = pci_pcie_cap(dev);
1985	if (!pos)
1986		return;
1987
1988	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1989	if (type & PCI_EXP_IDO_REQUEST)
1990		ctrl &= ~PCI_EXP_IDO_REQ_EN;
1991	if (type & PCI_EXP_IDO_COMPLETION)
1992		ctrl &= ~PCI_EXP_IDO_CMP_EN;
1993	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1994}
1995EXPORT_SYMBOL(pci_disable_ido);
1996
1997/**
1998 * pci_enable_obff - enable optimized buffer flush/fill
1999 * @dev: PCI device
2000 * @type: type of signaling to use
2001 *
2002 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2003 * signaling if possible, falling back to message signaling only if
2004 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2005 * be brought out of L0s or L1 to send the message.  It should be either
2006 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2007 *
2008 * If your device can benefit from receiving all messages, even at the
2009 * power cost of bringing the link back up from a low power state, use
2010 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2011 * preferred type).
2012 *
2013 * RETURNS:
2014 * Zero on success, appropriate error number on failure.
2015 */
2016int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2017{
2018	int pos;
2019	u32 cap;
2020	u16 ctrl;
2021	int ret;
2022
2023	if (!pci_is_pcie(dev))
2024		return -ENOTSUPP;
2025
2026	pos = pci_pcie_cap(dev);
2027	if (!pos)
2028		return -ENOTSUPP;
2029
2030	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2031	if (!(cap & PCI_EXP_OBFF_MASK))
2032		return -ENOTSUPP; /* no OBFF support at all */
2033
2034	/* Make sure the topology supports OBFF as well */
2035	if (dev->bus) {
2036		ret = pci_enable_obff(dev->bus->self, type);
2037		if (ret)
2038			return ret;
2039	}
2040
2041	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2042	if (cap & PCI_EXP_OBFF_WAKE)
2043		ctrl |= PCI_EXP_OBFF_WAKE_EN;
2044	else {
2045		switch (type) {
2046		case PCI_EXP_OBFF_SIGNAL_L0:
2047			if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2048				ctrl |= PCI_EXP_OBFF_MSGA_EN;
2049			break;
2050		case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2051			ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2052			ctrl |= PCI_EXP_OBFF_MSGB_EN;
2053			break;
2054		default:
2055			WARN(1, "bad OBFF signal type\n");
2056			return -ENOTSUPP;
2057		}
2058	}
2059	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2060
2061	return 0;
2062}
2063EXPORT_SYMBOL(pci_enable_obff);
2064
2065/**
2066 * pci_disable_obff - disable optimized buffer flush/fill
2067 * @dev: PCI device
2068 *
2069 * Disable OBFF on @dev.
2070 */
2071void pci_disable_obff(struct pci_dev *dev)
2072{
2073	int pos;
2074	u16 ctrl;
2075
2076	if (!pci_is_pcie(dev))
2077		return;
2078
2079	pos = pci_pcie_cap(dev);
2080	if (!pos)
2081		return;
2082
2083	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2084	ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2085	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2086}
2087EXPORT_SYMBOL(pci_disable_obff);
2088
2089/**
2090 * pci_ltr_supported - check whether a device supports LTR
2091 * @dev: PCI device
2092 *
2093 * RETURNS:
2094 * True if @dev supports latency tolerance reporting, false otherwise.
2095 */
2096bool pci_ltr_supported(struct pci_dev *dev)
2097{
2098	int pos;
2099	u32 cap;
2100
2101	if (!pci_is_pcie(dev))
2102		return false;
2103
2104	pos = pci_pcie_cap(dev);
2105	if (!pos)
2106		return false;
2107
2108	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
 
 
 
 
 
 
2109
2110	return cap & PCI_EXP_DEVCAP2_LTR;
 
2111}
2112EXPORT_SYMBOL(pci_ltr_supported);
2113
2114/**
2115 * pci_enable_ltr - enable latency tolerance reporting
2116 * @dev: PCI device
 
2117 *
2118 * Enable LTR on @dev if possible, which means enabling it first on
2119 * upstream ports.
2120 *
2121 * RETURNS:
2122 * Zero on success, errno on failure.
 
 
 
 
 
2123 */
2124int pci_enable_ltr(struct pci_dev *dev)
2125{
2126	int pos;
2127	u16 ctrl;
2128	int ret;
2129
2130	if (!pci_ltr_supported(dev))
2131		return -ENOTSUPP;
 
2132
2133	pos = pci_pcie_cap(dev);
2134	if (!pos)
2135		return -ENOTSUPP;
 
 
 
 
2136
2137	/* Only primary function can enable/disable LTR */
2138	if (PCI_FUNC(dev->devfn) != 0)
2139		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2140
2141	/* Enable upstream ports first */
2142	if (dev->bus) {
2143		ret = pci_enable_ltr(dev->bus->self);
2144		if (ret)
2145			return ret;
2146	}
2147
2148	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2149	ctrl |= PCI_EXP_LTR_EN;
2150	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2151
2152	return 0;
2153}
2154EXPORT_SYMBOL(pci_enable_ltr);
2155
2156/**
2157 * pci_disable_ltr - disable latency tolerance reporting
2158 * @dev: PCI device
 
 
 
 
 
2159 */
2160void pci_disable_ltr(struct pci_dev *dev)
 
2161{
2162	int pos;
2163	u16 ctrl;
2164
2165	if (!pci_ltr_supported(dev))
2166		return;
2167
2168	pos = pci_pcie_cap(dev);
2169	if (!pos)
2170		return;
2171
2172	/* Only primary function can enable/disable LTR */
2173	if (PCI_FUNC(dev->devfn) != 0)
2174		return;
2175
2176	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2177	ctrl &= ~PCI_EXP_LTR_EN;
2178	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2179}
2180EXPORT_SYMBOL(pci_disable_ltr);
2181
2182static int __pci_ltr_scale(int *val)
2183{
2184	int scale = 0;
2185
2186	while (*val > 1023) {
2187		*val = (*val + 31) / 32;
2188		scale++;
2189	}
2190	return scale;
2191}
2192
2193/**
2194 * pci_set_ltr - set LTR latency values
2195 * @dev: PCI device
2196 * @snoop_lat_ns: snoop latency in nanoseconds
2197 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2198 *
2199 * Figure out the scale and set the LTR values accordingly.
 
 
2200 */
2201int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2202{
2203	int pos, ret, snoop_scale, nosnoop_scale;
2204	u16 val;
2205
2206	if (!pci_ltr_supported(dev))
 
2207		return -ENOTSUPP;
2208
2209	snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2210	nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
 
 
 
 
 
 
 
 
 
 
 
 
 
2211
2212	if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2213	    nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2214		return -EINVAL;
 
 
 
 
 
 
 
 
 
2215
2216	if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2217	    (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2218		return -EINVAL;
2219
2220	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2221	if (!pos)
2222		return -ENOTSUPP;
2223
2224	val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2225	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2226	if (ret != 4)
2227		return -EIO;
 
 
 
 
 
 
 
 
2228
2229	val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2230	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2231	if (ret != 4)
2232		return -EIO;
2233
2234	return 0;
 
2235}
2236EXPORT_SYMBOL(pci_set_ltr);
2237
2238static int pci_acs_enable;
2239
2240/**
2241 * pci_request_acs - ask for ACS to be enabled if supported
 
 
 
 
 
 
2242 */
2243void pci_request_acs(void)
2244{
2245	pci_acs_enable = 1;
 
 
 
 
 
 
 
 
 
 
 
2246}
2247
2248/**
2249 * pci_enable_acs - enable ACS if hardware support it
2250 * @dev: the PCI device
 
 
 
 
 
 
 
 
 
2251 */
2252void pci_enable_acs(struct pci_dev *dev)
2253{
2254	int pos;
2255	u16 cap;
2256	u16 ctrl;
2257
2258	if (!pci_acs_enable)
2259		return;
2260
2261	if (!pci_is_pcie(dev))
2262		return;
2263
2264	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2265	if (!pos)
2266		return;
 
 
 
2267
2268	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2269	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 
 
 
 
 
 
2270
2271	/* Source Validation */
2272	ctrl |= (cap & PCI_ACS_SV);
2273
2274	/* P2P Request Redirect */
2275	ctrl |= (cap & PCI_ACS_RR);
2276
2277	/* P2P Completion Redirect */
2278	ctrl |= (cap & PCI_ACS_CR);
 
 
 
 
 
2279
2280	/* Upstream Forwarding */
2281	ctrl |= (cap & PCI_ACS_UF);
 
 
 
 
2282
2283	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
2284}
 
2285
2286/**
2287 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2288 * @dev: the PCI device
2289 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2290 *
2291 * Perform INTx swizzling for a device behind one level of bridge.  This is
2292 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2293 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2294 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2295 * the PCI Express Base Specification, Revision 2.1)
2296 */
2297u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2298{
2299	int slot;
2300
2301	if (pci_ari_enabled(dev->bus))
2302		slot = 0;
2303	else
2304		slot = PCI_SLOT(dev->devfn);
2305
2306	return (((pin - 1) + slot) % 4) + 1;
2307}
2308
2309int
2310pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2311{
2312	u8 pin;
2313
2314	pin = dev->pin;
2315	if (!pin)
2316		return -1;
2317
2318	while (!pci_is_root_bus(dev->bus)) {
2319		pin = pci_swizzle_interrupt_pin(dev, pin);
2320		dev = dev->bus->self;
2321	}
2322	*bridge = dev;
2323	return pin;
2324}
2325
2326/**
2327 * pci_common_swizzle - swizzle INTx all the way to root bridge
2328 * @dev: the PCI device
2329 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2330 *
2331 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2332 * bridges all the way up to a PCI root bus.
2333 */
2334u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2335{
2336	u8 pin = *pinp;
2337
2338	while (!pci_is_root_bus(dev->bus)) {
2339		pin = pci_swizzle_interrupt_pin(dev, pin);
2340		dev = dev->bus->self;
2341	}
2342	*pinp = pin;
2343	return PCI_SLOT(dev->devfn);
2344}
 
2345
2346/**
2347 *	pci_release_region - Release a PCI bar
2348 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2349 *	@bar: BAR to release
2350 *
2351 *	Releases the PCI I/O and memory resources previously reserved by a
2352 *	successful call to pci_request_region.  Call this function only
2353 *	after all use of the PCI regions has ceased.
2354 */
2355void pci_release_region(struct pci_dev *pdev, int bar)
2356{
2357	struct pci_devres *dr;
2358
2359	if (pci_resource_len(pdev, bar) == 0)
2360		return;
2361	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2362		release_region(pci_resource_start(pdev, bar),
2363				pci_resource_len(pdev, bar));
2364	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2365		release_mem_region(pci_resource_start(pdev, bar),
2366				pci_resource_len(pdev, bar));
2367
2368	dr = find_pci_dr(pdev);
2369	if (dr)
2370		dr->region_mask &= ~(1 << bar);
2371}
 
2372
2373/**
2374 *	__pci_request_region - Reserved PCI I/O and memory resource
2375 *	@pdev: PCI device whose resources are to be reserved
2376 *	@bar: BAR to be reserved
2377 *	@res_name: Name to be associated with resource.
2378 *	@exclusive: whether the region access is exclusive or not
2379 *
2380 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2381 *	being reserved by owner @res_name.  Do not access any
2382 *	address inside the PCI regions unless this call returns
2383 *	successfully.
2384 *
2385 *	If @exclusive is set, then the region is marked so that userspace
2386 *	is explicitly not allowed to map the resource via /dev/mem or
2387 * 	sysfs MMIO access.
2388 *
2389 *	Returns 0 on success, or %EBUSY on error.  A warning
2390 *	message is also printed on failure.
2391 */
2392static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2393									int exclusive)
2394{
2395	struct pci_devres *dr;
2396
2397	if (pci_resource_len(pdev, bar) == 0)
2398		return 0;
2399		
2400	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2401		if (!request_region(pci_resource_start(pdev, bar),
2402			    pci_resource_len(pdev, bar), res_name))
2403			goto err_out;
2404	}
2405	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2406		if (!__request_mem_region(pci_resource_start(pdev, bar),
2407					pci_resource_len(pdev, bar), res_name,
2408					exclusive))
2409			goto err_out;
2410	}
2411
2412	dr = find_pci_dr(pdev);
2413	if (dr)
2414		dr->region_mask |= 1 << bar;
2415
2416	return 0;
2417
2418err_out:
2419	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2420		 &pdev->resource[bar]);
2421	return -EBUSY;
2422}
2423
2424/**
2425 *	pci_request_region - Reserve PCI I/O and memory resource
2426 *	@pdev: PCI device whose resources are to be reserved
2427 *	@bar: BAR to be reserved
2428 *	@res_name: Name to be associated with resource
2429 *
2430 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2431 *	being reserved by owner @res_name.  Do not access any
2432 *	address inside the PCI regions unless this call returns
2433 *	successfully.
2434 *
2435 *	Returns 0 on success, or %EBUSY on error.  A warning
2436 *	message is also printed on failure.
2437 */
2438int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2439{
2440	return __pci_request_region(pdev, bar, res_name, 0);
2441}
 
2442
2443/**
2444 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2445 *	@pdev: PCI device whose resources are to be reserved
2446 *	@bar: BAR to be reserved
2447 *	@res_name: Name to be associated with resource.
2448 *
2449 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2450 *	being reserved by owner @res_name.  Do not access any
2451 *	address inside the PCI regions unless this call returns
2452 *	successfully.
2453 *
2454 *	Returns 0 on success, or %EBUSY on error.  A warning
2455 *	message is also printed on failure.
2456 *
2457 *	The key difference that _exclusive makes it that userspace is
2458 *	explicitly not allowed to map the resource via /dev/mem or
2459 * 	sysfs.
2460 */
2461int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
 
2462{
2463	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2464}
 
 
2465/**
2466 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2467 * @pdev: PCI device whose resources were previously reserved
2468 * @bars: Bitmask of BARs to be released
2469 *
2470 * Release selected PCI I/O and memory resources previously reserved.
2471 * Call this function only after all use of the PCI regions has ceased.
2472 */
2473void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2474{
2475	int i;
2476
2477	for (i = 0; i < 6; i++)
2478		if (bars & (1 << i))
2479			pci_release_region(pdev, i);
2480}
 
2481
2482int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2483				 const char *res_name, int excl)
2484{
2485	int i;
2486
2487	for (i = 0; i < 6; i++)
2488		if (bars & (1 << i))
2489			if (__pci_request_region(pdev, i, res_name, excl))
2490				goto err_out;
2491	return 0;
2492
2493err_out:
2494	while(--i >= 0)
2495		if (bars & (1 << i))
2496			pci_release_region(pdev, i);
2497
2498	return -EBUSY;
2499}
2500
2501
2502/**
2503 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2504 * @pdev: PCI device whose resources are to be reserved
2505 * @bars: Bitmask of BARs to be requested
2506 * @res_name: Name to be associated with resource
2507 */
2508int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2509				 const char *res_name)
2510{
2511	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2512}
 
2513
2514int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2515				 int bars, const char *res_name)
2516{
2517	return __pci_request_selected_regions(pdev, bars, res_name,
2518			IORESOURCE_EXCLUSIVE);
2519}
 
2520
2521/**
2522 *	pci_release_regions - Release reserved PCI I/O and memory resources
2523 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2524 *
2525 *	Releases all PCI I/O and memory resources previously reserved by a
2526 *	successful call to pci_request_regions.  Call this function only
2527 *	after all use of the PCI regions has ceased.
2528 */
2529
2530void pci_release_regions(struct pci_dev *pdev)
2531{
2532	pci_release_selected_regions(pdev, (1 << 6) - 1);
2533}
 
2534
2535/**
2536 *	pci_request_regions - Reserved PCI I/O and memory resources
2537 *	@pdev: PCI device whose resources are to be reserved
2538 *	@res_name: Name to be associated with resource.
2539 *
2540 *	Mark all PCI regions associated with PCI device @pdev as
2541 *	being reserved by owner @res_name.  Do not access any
2542 *	address inside the PCI regions unless this call returns
2543 *	successfully.
2544 *
2545 *	Returns 0 on success, or %EBUSY on error.  A warning
2546 *	message is also printed on failure.
2547 */
2548int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2549{
2550	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2551}
 
2552
2553/**
2554 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2555 *	@pdev: PCI device whose resources are to be reserved
2556 *	@res_name: Name to be associated with resource.
2557 *
2558 *	Mark all PCI regions associated with PCI device @pdev as
2559 *	being reserved by owner @res_name.  Do not access any
2560 *	address inside the PCI regions unless this call returns
2561 *	successfully.
2562 *
2563 *	pci_request_regions_exclusive() will mark the region so that
2564 * 	/dev/mem and the sysfs MMIO access will not be allowed.
2565 *
2566 *	Returns 0 on success, or %EBUSY on error.  A warning
2567 *	message is also printed on failure.
2568 */
2569int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2570{
2571	return pci_request_selected_regions_exclusive(pdev,
2572					((1 << 6) - 1), res_name);
2573}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2574
2575static void __pci_set_master(struct pci_dev *dev, bool enable)
2576{
2577	u16 old_cmd, cmd;
2578
2579	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2580	if (enable)
2581		cmd = old_cmd | PCI_COMMAND_MASTER;
2582	else
2583		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2584	if (cmd != old_cmd) {
2585		dev_dbg(&dev->dev, "%s bus mastering\n",
2586			enable ? "enabling" : "disabling");
2587		pci_write_config_word(dev, PCI_COMMAND, cmd);
2588	}
2589	dev->is_busmaster = enable;
2590}
2591
2592/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2593 * pci_set_master - enables bus-mastering for device dev
2594 * @dev: the PCI device to enable
2595 *
2596 * Enables bus-mastering on the device and calls pcibios_set_master()
2597 * to do the needed arch specific settings.
2598 */
2599void pci_set_master(struct pci_dev *dev)
2600{
2601	__pci_set_master(dev, true);
2602	pcibios_set_master(dev);
2603}
 
2604
2605/**
2606 * pci_clear_master - disables bus-mastering for device dev
2607 * @dev: the PCI device to disable
2608 */
2609void pci_clear_master(struct pci_dev *dev)
2610{
2611	__pci_set_master(dev, false);
2612}
 
2613
2614/**
2615 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2616 * @dev: the PCI device for which MWI is to be enabled
2617 *
2618 * Helper function for pci_set_mwi.
2619 * Originally copied from drivers/net/acenic.c.
2620 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2621 *
2622 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2623 */
2624int pci_set_cacheline_size(struct pci_dev *dev)
2625{
2626	u8 cacheline_size;
2627
2628	if (!pci_cache_line_size)
2629		return -EINVAL;
2630
2631	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2632	   equal to or multiple of the right value. */
2633	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2634	if (cacheline_size >= pci_cache_line_size &&
2635	    (cacheline_size % pci_cache_line_size) == 0)
2636		return 0;
2637
2638	/* Write the correct value. */
2639	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2640	/* Read it back. */
2641	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2642	if (cacheline_size == pci_cache_line_size)
2643		return 0;
2644
2645	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2646		   "supported\n", pci_cache_line_size << 2);
2647
2648	return -EINVAL;
2649}
2650EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2651
2652#ifdef PCI_DISABLE_MWI
2653int pci_set_mwi(struct pci_dev *dev)
2654{
2655	return 0;
2656}
2657
2658int pci_try_set_mwi(struct pci_dev *dev)
2659{
2660	return 0;
2661}
2662
2663void pci_clear_mwi(struct pci_dev *dev)
2664{
2665}
2666
2667#else
2668
2669/**
2670 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2671 * @dev: the PCI device for which MWI is enabled
2672 *
2673 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2674 *
2675 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2676 */
2677int
2678pci_set_mwi(struct pci_dev *dev)
2679{
 
 
 
2680	int rc;
2681	u16 cmd;
2682
2683	rc = pci_set_cacheline_size(dev);
2684	if (rc)
2685		return rc;
2686
2687	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2688	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2689		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2690		cmd |= PCI_COMMAND_INVALIDATE;
2691		pci_write_config_word(dev, PCI_COMMAND, cmd);
2692	}
2693	
2694	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2695}
 
2696
2697/**
2698 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2699 * @dev: the PCI device for which MWI is enabled
2700 *
2701 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2702 * Callers are not required to check the return value.
2703 *
2704 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2705 */
2706int pci_try_set_mwi(struct pci_dev *dev)
2707{
2708	int rc = pci_set_mwi(dev);
2709	return rc;
 
 
 
2710}
 
2711
2712/**
2713 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2714 * @dev: the PCI device to disable
2715 *
2716 * Disables PCI Memory-Write-Invalidate transaction on the device
2717 */
2718void
2719pci_clear_mwi(struct pci_dev *dev)
2720{
 
2721	u16 cmd;
2722
2723	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2724	if (cmd & PCI_COMMAND_INVALIDATE) {
2725		cmd &= ~PCI_COMMAND_INVALIDATE;
2726		pci_write_config_word(dev, PCI_COMMAND, cmd);
2727	}
 
2728}
2729#endif /* ! PCI_DISABLE_MWI */
2730
2731/**
2732 * pci_intx - enables/disables PCI INTx for device dev
2733 * @pdev: the PCI device to operate on
2734 * @enable: boolean: whether to enable or disable PCI INTx
2735 *
2736 * Enables/disables PCI INTx for device dev
2737 */
2738void
2739pci_intx(struct pci_dev *pdev, int enable)
2740{
2741	u16 pci_command, new;
2742
2743	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2744
2745	if (enable) {
2746		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2747	} else {
2748		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2749	}
2750
2751	if (new != pci_command) {
2752		struct pci_devres *dr;
2753
2754		pci_write_config_word(pdev, PCI_COMMAND, new);
2755
2756		dr = find_pci_dr(pdev);
2757		if (dr && !dr->restore_intx) {
2758			dr->restore_intx = 1;
2759			dr->orig_intx = !enable;
2760		}
2761	}
2762}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2763
2764/**
2765 * pci_msi_off - disables any msi or msix capabilities
2766 * @dev: the PCI device to operate on
2767 *
2768 * If you want to use msi see pci_enable_msi and friends.
2769 * This is a lower level primitive that allows us to disable
2770 * msi operation at the device level.
2771 */
2772void pci_msi_off(struct pci_dev *dev)
2773{
2774	int pos;
2775	u16 control;
2776
2777	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2778	if (pos) {
2779		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2780		control &= ~PCI_MSI_FLAGS_ENABLE;
2781		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2782	}
2783	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2784	if (pos) {
2785		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2786		control &= ~PCI_MSIX_FLAGS_ENABLE;
2787		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2788	}
2789}
2790EXPORT_SYMBOL_GPL(pci_msi_off);
2791
2792int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
 
 
 
 
 
 
 
 
2793{
2794	return dma_set_max_seg_size(&dev->dev, size);
2795}
2796EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2797
2798int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
 
 
 
 
 
 
2799{
2800	return dma_set_seg_boundary(&dev->dev, mask);
 
 
 
 
2801}
2802EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2803
2804static int pcie_flr(struct pci_dev *dev, int probe)
2805{
2806	int i;
2807	int pos;
2808	u32 cap;
2809	u16 status, control;
2810
2811	pos = pci_pcie_cap(dev);
2812	if (!pos)
2813		return -ENOTTY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2814
2815	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2816	if (!(cap & PCI_EXP_DEVCAP_FLR))
2817		return -ENOTTY;
 
 
 
 
 
2818
2819	if (probe)
2820		return 0;
 
2821
2822	/* Wait for Transaction Pending bit clean */
2823	for (i = 0; i < 4; i++) {
2824		if (i)
2825			msleep((1 << (i - 1)) * 100);
2826
2827		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2828		if (!(status & PCI_EXP_DEVSTA_TRPND))
2829			goto clear;
2830	}
 
 
 
 
 
 
 
 
 
 
 
 
 
2831
2832	dev_err(&dev->dev, "transaction is not cleared; "
2833			"proceeding with reset anyway\n");
2834
2835clear:
2836	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2837	control |= PCI_EXP_DEVCTL_BCR_FLR;
2838	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
 
 
 
 
 
 
 
2839
 
 
 
 
 
2840	msleep(100);
2841
2842	return 0;
2843}
 
2844
2845static int pci_af_flr(struct pci_dev *dev, int probe)
2846{
2847	int i;
2848	int pos;
2849	u8 cap;
2850	u8 status;
2851
2852	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2853	if (!pos)
2854		return -ENOTTY;
2855
 
 
 
2856	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2857	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2858		return -ENOTTY;
2859
2860	if (probe)
2861		return 0;
2862
2863	/* Wait for Transaction Pending bit clean */
2864	for (i = 0; i < 4; i++) {
2865		if (i)
2866			msleep((1 << (i - 1)) * 100);
2867
2868		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2869		if (!(status & PCI_AF_STATUS_TP))
2870			goto clear;
2871	}
2872
2873	dev_err(&dev->dev, "transaction is not cleared; "
2874			"proceeding with reset anyway\n");
2875
2876clear:
2877	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
 
 
 
 
 
 
 
2878	msleep(100);
2879
2880	return 0;
2881}
2882
2883/**
2884 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
2885 * @dev: Device to reset.
2886 * @probe: If set, only check if the device can be reset this way.
2887 *
2888 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
2889 * unset, it will be reinitialized internally when going from PCI_D3hot to
2890 * PCI_D0.  If that's the case and the device is not in a low-power state
2891 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
2892 *
2893 * NOTE: This causes the caller to sleep for twice the device power transition
2894 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
2895 * by devault (i.e. unless the @dev's d3_delay field has a different value).
2896 * Moreover, only devices in D0 can be reset by this function.
2897 */
2898static int pci_pm_reset(struct pci_dev *dev, int probe)
2899{
2900	u16 csr;
2901
2902	if (!dev->pm_cap)
2903		return -ENOTTY;
2904
2905	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2906	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2907		return -ENOTTY;
2908
2909	if (probe)
2910		return 0;
2911
2912	if (dev->current_state != PCI_D0)
2913		return -EINVAL;
2914
2915	csr &= ~PCI_PM_CTRL_STATE_MASK;
2916	csr |= PCI_D3hot;
2917	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2918	pci_dev_d3_sleep(dev);
2919
2920	csr &= ~PCI_PM_CTRL_STATE_MASK;
2921	csr |= PCI_D0;
2922	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2923	pci_dev_d3_sleep(dev);
2924
2925	return 0;
2926}
2927
2928static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2929{
2930	u16 ctrl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2931	struct pci_dev *pdev;
2932
2933	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
 
2934		return -ENOTTY;
2935
2936	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2937		if (pdev != dev)
2938			return -ENOTTY;
2939
2940	if (probe)
2941		return 0;
2942
2943	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2944	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2945	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2946	msleep(100);
2947
2948	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2949	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2950	msleep(100);
2951
2952	return 0;
2953}
2954
2955static int pci_dev_reset(struct pci_dev *dev, int probe)
2956{
2957	int rc;
2958
2959	might_sleep();
 
2960
2961	if (!probe) {
2962		pci_block_user_cfg_access(dev);
2963		/* block PM suspend, driver probe, etc. */
2964		device_lock(&dev->dev);
2965	}
2966
2967	rc = pci_dev_specific_reset(dev, probe);
2968	if (rc != -ENOTTY)
2969		goto done;
2970
2971	rc = pcie_flr(dev, probe);
2972	if (rc != -ENOTTY)
2973		goto done;
2974
2975	rc = pci_af_flr(dev, probe);
2976	if (rc != -ENOTTY)
2977		goto done;
2978
2979	rc = pci_pm_reset(dev, probe);
2980	if (rc != -ENOTTY)
2981		goto done;
2982
2983	rc = pci_parent_bus_reset(dev, probe);
2984done:
2985	if (!probe) {
2986		device_unlock(&dev->dev);
2987		pci_unblock_user_cfg_access(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2988	}
2989
2990	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2991}
2992
2993/**
2994 * __pci_reset_function - reset a PCI device function
 
2995 * @dev: PCI device to reset
2996 *
2997 * Some devices allow an individual function to be reset without affecting
2998 * other functions in the same device.  The PCI device must be responsive
2999 * to PCI config space in order to use this function.
3000 *
3001 * The device function is presumed to be unused when this function is called.
 
3002 * Resetting the device will make the contents of PCI configuration space
3003 * random, so any caller of this must be prepared to reinitialise the
3004 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3005 * etc.
3006 *
3007 * Returns 0 if the device function was successfully reset or negative if the
3008 * device doesn't support resetting a single function.
3009 */
3010int __pci_reset_function(struct pci_dev *dev)
3011{
3012	return pci_dev_reset(dev, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3013}
3014EXPORT_SYMBOL_GPL(__pci_reset_function);
3015
3016/**
3017 * pci_probe_reset_function - check whether the device can be safely reset
3018 * @dev: PCI device to reset
3019 *
3020 * Some devices allow an individual function to be reset without affecting
3021 * other functions in the same device.  The PCI device must be responsive
3022 * to PCI config space in order to use this function.
3023 *
3024 * Returns 0 if the device function can be reset or negative if the
3025 * device doesn't support resetting a single function.
3026 */
3027int pci_probe_reset_function(struct pci_dev *dev)
3028{
3029	return pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3030}
3031
3032/**
3033 * pci_reset_function - quiesce and reset a PCI device function
3034 * @dev: PCI device to reset
3035 *
3036 * Some devices allow an individual function to be reset without affecting
3037 * other functions in the same device.  The PCI device must be responsive
3038 * to PCI config space in order to use this function.
3039 *
3040 * This function does not just reset the PCI portion of a device, but
3041 * clears all the state associated with the device.  This function differs
3042 * from __pci_reset_function in that it saves and restores device state
3043 * over the reset.
3044 *
3045 * Returns 0 if the device function was successfully reset or negative if the
3046 * device doesn't support resetting a single function.
3047 */
3048int pci_reset_function(struct pci_dev *dev)
3049{
3050	int rc;
3051
3052	rc = pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3053	if (rc)
3054		return rc;
3055
3056	pci_save_state(dev);
3057
3058	/*
3059	 * both INTx and MSI are disabled after the Interrupt Disable bit
3060	 * is set and the Bus Master bit is cleared.
3061	 */
3062	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3063
3064	rc = pci_dev_reset(dev, 0);
3065
3066	pci_restore_state(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3067
3068	return rc;
3069}
3070EXPORT_SYMBOL_GPL(pci_reset_function);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3071
3072/**
3073 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3074 * @dev: PCI device to query
3075 *
3076 * Returns mmrbc: maximum designed memory read count in bytes
3077 *    or appropriate error value.
3078 */
3079int pcix_get_max_mmrbc(struct pci_dev *dev)
3080{
3081	int cap;
3082	u32 stat;
3083
3084	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3085	if (!cap)
3086		return -EINVAL;
3087
3088	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3089		return -EINVAL;
3090
3091	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3092}
3093EXPORT_SYMBOL(pcix_get_max_mmrbc);
3094
3095/**
3096 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3097 * @dev: PCI device to query
3098 *
3099 * Returns mmrbc: maximum memory read count in bytes
3100 *    or appropriate error value.
3101 */
3102int pcix_get_mmrbc(struct pci_dev *dev)
3103{
3104	int cap;
3105	u16 cmd;
3106
3107	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3108	if (!cap)
3109		return -EINVAL;
3110
3111	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3112		return -EINVAL;
3113
3114	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3115}
3116EXPORT_SYMBOL(pcix_get_mmrbc);
3117
3118/**
3119 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3120 * @dev: PCI device to query
3121 * @mmrbc: maximum memory read count in bytes
3122 *    valid values are 512, 1024, 2048, 4096
3123 *
3124 * If possible sets maximum memory read byte count, some bridges have erratas
3125 * that prevent this.
3126 */
3127int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3128{
3129	int cap;
3130	u32 stat, v, o;
3131	u16 cmd;
3132
3133	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3134		return -EINVAL;
3135
3136	v = ffs(mmrbc) - 10;
3137
3138	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3139	if (!cap)
3140		return -EINVAL;
3141
3142	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3143		return -EINVAL;
3144
3145	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3146		return -E2BIG;
3147
3148	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3149		return -EINVAL;
3150
3151	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3152	if (o != v) {
3153		if (v > o && dev->bus &&
3154		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3155			return -EIO;
3156
3157		cmd &= ~PCI_X_CMD_MAX_READ;
3158		cmd |= v << 2;
3159		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3160			return -EIO;
3161	}
3162	return 0;
3163}
3164EXPORT_SYMBOL(pcix_set_mmrbc);
3165
3166/**
3167 * pcie_get_readrq - get PCI Express read request size
3168 * @dev: PCI device to query
3169 *
3170 * Returns maximum memory read request in bytes
3171 *    or appropriate error value.
3172 */
3173int pcie_get_readrq(struct pci_dev *dev)
3174{
3175	int ret, cap;
3176	u16 ctl;
3177
3178	cap = pci_pcie_cap(dev);
3179	if (!cap)
3180		return -EINVAL;
3181
3182	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3183	if (!ret)
3184		ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3185
3186	return ret;
3187}
3188EXPORT_SYMBOL(pcie_get_readrq);
3189
3190/**
3191 * pcie_set_readrq - set PCI Express maximum memory read request
3192 * @dev: PCI device to query
3193 * @rq: maximum memory read count in bytes
3194 *    valid values are 128, 256, 512, 1024, 2048, 4096
3195 *
3196 * If possible sets maximum memory read request in bytes
3197 */
3198int pcie_set_readrq(struct pci_dev *dev, int rq)
3199{
3200	int cap, err = -EINVAL;
3201	u16 ctl, v;
3202
3203	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3204		goto out;
3205
3206	v = (ffs(rq) - 8) << 12;
3207
3208	cap = pci_pcie_cap(dev);
3209	if (!cap)
3210		goto out;
3211
3212	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3213	if (err)
3214		goto out;
 
 
 
 
 
3215
3216	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3217		ctl &= ~PCI_EXP_DEVCTL_READRQ;
3218		ctl |= v;
3219		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3220	}
3221
3222out:
3223	return err;
 
 
3224}
3225EXPORT_SYMBOL(pcie_set_readrq);
3226
3227/**
3228 * pcie_get_mps - get PCI Express maximum payload size
3229 * @dev: PCI device to query
3230 *
3231 * Returns maximum payload size in bytes
3232 *    or appropriate error value.
3233 */
3234int pcie_get_mps(struct pci_dev *dev)
3235{
3236	int ret, cap;
3237	u16 ctl;
3238
3239	cap = pci_pcie_cap(dev);
3240	if (!cap)
3241		return -EINVAL;
3242
3243	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3244	if (!ret)
3245		ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3246
3247	return ret;
3248}
 
3249
3250/**
3251 * pcie_set_mps - set PCI Express maximum payload size
3252 * @dev: PCI device to query
3253 * @mps: maximum payload size in bytes
3254 *    valid values are 128, 256, 512, 1024, 2048, 4096
3255 *
3256 * If possible sets maximum payload size
3257 */
3258int pcie_set_mps(struct pci_dev *dev, int mps)
3259{
3260	int cap, err = -EINVAL;
3261	u16 ctl, v;
3262
3263	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3264		goto out;
3265
3266	v = ffs(mps) - 8;
3267	if (v > dev->pcie_mpss) 
3268		goto out;
3269	v <<= 5;
3270
3271	cap = pci_pcie_cap(dev);
3272	if (!cap)
3273		goto out;
 
3274
3275	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3276	if (err)
3277		goto out;
 
 
 
 
 
 
 
 
 
 
3278
3279	if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3280		ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3281		ctl |= v;
3282		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3283	}
3284out:
3285	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3286}
 
3287
3288/**
3289 * pci_select_bars - Make BAR mask from the type of resource
3290 * @dev: the PCI device for which BAR mask is made
3291 * @flags: resource type mask to be selected
3292 *
3293 * This helper routine makes bar mask from the type of resource.
3294 */
3295int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3296{
3297	int i, bars = 0;
3298	for (i = 0; i < PCI_NUM_RESOURCES; i++)
3299		if (pci_resource_flags(dev, i) & flags)
3300			bars |= (1 << i);
3301	return bars;
3302}
3303
3304/**
3305 * pci_resource_bar - get position of the BAR associated with a resource
3306 * @dev: the PCI device
3307 * @resno: the resource number
3308 * @type: the BAR type to be filled in
3309 *
3310 * Returns BAR position in config space, or 0 if the BAR is invalid.
3311 */
3312int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3313{
3314	int reg;
3315
3316	if (resno < PCI_ROM_RESOURCE) {
3317		*type = pci_bar_unknown;
3318		return PCI_BASE_ADDRESS_0 + 4 * resno;
3319	} else if (resno == PCI_ROM_RESOURCE) {
3320		*type = pci_bar_mem32;
3321		return dev->rom_base_reg;
3322	} else if (resno < PCI_BRIDGE_RESOURCES) {
3323		/* device specific resource */
3324		reg = pci_iov_resource_bar(dev, resno, type);
3325		if (reg)
3326			return reg;
3327	}
3328
3329	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3330	return 0;
3331}
3332
3333/* Some architectures require additional programming to enable VGA */
3334static arch_set_vga_state_t arch_set_vga_state;
3335
3336void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3337{
3338	arch_set_vga_state = func;	/* NULL disables */
3339}
3340
3341static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3342		      unsigned int command_bits, u32 flags)
3343{
3344	if (arch_set_vga_state)
3345		return arch_set_vga_state(dev, decode, command_bits,
3346						flags);
3347	return 0;
3348}
3349
3350/**
3351 * pci_set_vga_state - set VGA decode state on device and parents if requested
3352 * @dev: the PCI device
3353 * @decode: true = enable decoding, false = disable decoding
3354 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3355 * @flags: traverse ancestors and change bridges
3356 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3357 */
3358int pci_set_vga_state(struct pci_dev *dev, bool decode,
3359		      unsigned int command_bits, u32 flags)
3360{
3361	struct pci_bus *bus;
3362	struct pci_dev *bridge;
3363	u16 cmd;
3364	int rc;
3365
3366	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3367
3368	/* ARCH specific VGA enables */
3369	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3370	if (rc)
3371		return rc;
3372
3373	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3374		pci_read_config_word(dev, PCI_COMMAND, &cmd);
3375		if (decode == true)
3376			cmd |= command_bits;
3377		else
3378			cmd &= ~command_bits;
3379		pci_write_config_word(dev, PCI_COMMAND, cmd);
3380	}
3381
3382	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3383		return 0;
3384
3385	bus = dev->bus;
3386	while (bus) {
3387		bridge = bus->self;
3388		if (bridge) {
3389			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3390					     &cmd);
3391			if (decode == true)
3392				cmd |= PCI_BRIDGE_CTL_VGA;
3393			else
3394				cmd &= ~PCI_BRIDGE_CTL_VGA;
3395			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3396					      cmd);
3397		}
3398		bus = bus->parent;
3399	}
3400	return 0;
3401}
3402
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3403#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3404static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
3405static DEFINE_SPINLOCK(resource_alignment_lock);
3406
3407/**
3408 * pci_specified_resource_alignment - get resource alignment specified by user.
3409 * @dev: the PCI device to get
 
3410 *
3411 * RETURNS: Resource alignment if it is specified.
3412 *          Zero if it is not specified.
3413 */
3414resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
 
3415{
3416	int seg, bus, slot, func, align_order, count;
3417	resource_size_t align = 0;
 
3418	char *p;
3419
3420	spin_lock(&resource_alignment_lock);
3421	p = resource_alignment_param;
 
 
 
 
 
 
 
 
3422	while (*p) {
3423		count = 0;
3424		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3425							p[count] == '@') {
3426			p += count + 1;
3427		} else {
3428			align_order = -1;
3429		}
3430		if (sscanf(p, "%x:%x:%x.%x%n",
3431			&seg, &bus, &slot, &func, &count) != 4) {
3432			seg = 0;
3433			if (sscanf(p, "%x:%x.%x%n",
3434					&bus, &slot, &func, &count) != 3) {
3435				/* Invalid format */
3436				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3437					p);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3438				break;
3439			}
3440		}
3441		p += count;
3442		if (seg == pci_domain_nr(dev->bus) &&
3443			bus == dev->bus->number &&
3444			slot == PCI_SLOT(dev->devfn) &&
3445			func == PCI_FUNC(dev->devfn)) {
3446			if (align_order == -1) {
3447				align = PAGE_SIZE;
3448			} else {
3449				align = 1 << align_order;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3450			}
3451			/* Found */
3452			break;
3453		}
3454		if (*p != ';' && *p != ',') {
3455			/* End of param or invalid format */
3456			break;
3457		}
3458		p++;
3459	}
 
3460	spin_unlock(&resource_alignment_lock);
3461	return align;
3462}
3463
3464/**
3465 * pci_is_reassigndev - check if specified PCI is target device to reassign
3466 * @dev: the PCI device to check
3467 *
3468 * RETURNS: non-zero for PCI device is a target device to reassign,
3469 *          or zero is not.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3470 */
3471int pci_is_reassigndev(struct pci_dev *dev)
3472{
3473	return (pci_specified_resource_alignment(dev) != 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3474}
3475
3476ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3477{
3478	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3479		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3480	spin_lock(&resource_alignment_lock);
3481	strncpy(resource_alignment_param, buf, count);
3482	resource_alignment_param[count] = '\0';
3483	spin_unlock(&resource_alignment_lock);
3484	return count;
3485}
3486
3487ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3488{
3489	size_t count;
3490	spin_lock(&resource_alignment_lock);
3491	count = snprintf(buf, size, "%s", resource_alignment_param);
3492	spin_unlock(&resource_alignment_lock);
3493	return count;
3494}
3495
3496static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3497{
3498	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3499}
3500
3501static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3502					const char *buf, size_t count)
3503{
3504	return pci_set_resource_alignment_param(buf, count);
3505}
3506
3507BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3508					pci_resource_alignment_store);
3509
3510static int __init pci_resource_alignment_sysfs_init(void)
3511{
3512	return bus_create_file(&pci_bus_type,
3513					&bus_attr_resource_alignment);
3514}
3515
3516late_initcall(pci_resource_alignment_sysfs_init);
3517
3518static void __devinit pci_no_domains(void)
3519{
3520#ifdef CONFIG_PCI_DOMAINS
3521	pci_domains_supported = 0;
3522#endif
3523}
3524
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3525/**
3526 * pci_ext_cfg_enabled - can we access extended PCI config space?
3527 * @dev: The PCI device of the root bridge.
3528 *
3529 * Returns 1 if we can access PCI extended config space (offsets
3530 * greater than 0xff). This is the default implementation. Architecture
3531 * implementations can override this.
3532 */
3533int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3534{
3535	return 1;
3536}
3537
3538void __weak pci_fixup_cardbus(struct pci_bus *bus)
3539{
3540}
3541EXPORT_SYMBOL(pci_fixup_cardbus);
3542
3543static int __init pci_setup(char *str)
3544{
3545	while (str) {
3546		char *k = strchr(str, ',');
3547		if (k)
3548			*k++ = 0;
3549		if (*str && (str = pcibios_setup(str)) && *str) {
3550			if (!strcmp(str, "nomsi")) {
3551				pci_no_msi();
3552			} else if (!strcmp(str, "noaer")) {
3553				pci_no_aer();
 
 
3554			} else if (!strncmp(str, "realloc", 7)) {
3555				pci_realloc();
3556			} else if (!strcmp(str, "nodomains")) {
3557				pci_no_domains();
 
 
3558			} else if (!strncmp(str, "cbiosize=", 9)) {
3559				pci_cardbus_io_size = memparse(str + 9, &str);
3560			} else if (!strncmp(str, "cbmemsize=", 10)) {
3561				pci_cardbus_mem_size = memparse(str + 10, &str);
3562			} else if (!strncmp(str, "resource_alignment=", 19)) {
3563				pci_set_resource_alignment_param(str + 19,
3564							strlen(str + 19));
3565			} else if (!strncmp(str, "ecrc=", 5)) {
3566				pcie_ecrc_get_policy(str + 5);
3567			} else if (!strncmp(str, "hpiosize=", 9)) {
3568				pci_hotplug_io_size = memparse(str + 9, &str);
3569			} else if (!strncmp(str, "hpmemsize=", 10)) {
3570				pci_hotplug_mem_size = memparse(str + 10, &str);
 
 
 
 
 
3571			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3572				pcie_bus_config = PCIE_BUS_TUNE_OFF;
3573			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
3574				pcie_bus_config = PCIE_BUS_SAFE;
3575			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
3576				pcie_bus_config = PCIE_BUS_PERFORMANCE;
3577			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3578				pcie_bus_config = PCIE_BUS_PEER2PEER;
 
 
3579			} else {
3580				printk(KERN_ERR "PCI: Unknown option `%s'\n",
3581						str);
3582			}
3583		}
3584		str = k;
3585	}
3586	return 0;
3587}
3588early_param("pci", pci_setup);
3589
3590EXPORT_SYMBOL(pci_reenable_device);
3591EXPORT_SYMBOL(pci_enable_device_io);
3592EXPORT_SYMBOL(pci_enable_device_mem);
3593EXPORT_SYMBOL(pci_enable_device);
3594EXPORT_SYMBOL(pcim_enable_device);
3595EXPORT_SYMBOL(pcim_pin_device);
3596EXPORT_SYMBOL(pci_disable_device);
3597EXPORT_SYMBOL(pci_find_capability);
3598EXPORT_SYMBOL(pci_bus_find_capability);
3599EXPORT_SYMBOL(pci_release_regions);
3600EXPORT_SYMBOL(pci_request_regions);
3601EXPORT_SYMBOL(pci_request_regions_exclusive);
3602EXPORT_SYMBOL(pci_release_region);
3603EXPORT_SYMBOL(pci_request_region);
3604EXPORT_SYMBOL(pci_request_region_exclusive);
3605EXPORT_SYMBOL(pci_release_selected_regions);
3606EXPORT_SYMBOL(pci_request_selected_regions);
3607EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3608EXPORT_SYMBOL(pci_set_master);
3609EXPORT_SYMBOL(pci_clear_master);
3610EXPORT_SYMBOL(pci_set_mwi);
3611EXPORT_SYMBOL(pci_try_set_mwi);
3612EXPORT_SYMBOL(pci_clear_mwi);
3613EXPORT_SYMBOL_GPL(pci_intx);
3614EXPORT_SYMBOL(pci_assign_resource);
3615EXPORT_SYMBOL(pci_find_parent_resource);
3616EXPORT_SYMBOL(pci_select_bars);
3617
3618EXPORT_SYMBOL(pci_set_power_state);
3619EXPORT_SYMBOL(pci_save_state);
3620EXPORT_SYMBOL(pci_restore_state);
3621EXPORT_SYMBOL(pci_pme_capable);
3622EXPORT_SYMBOL(pci_pme_active);
3623EXPORT_SYMBOL(pci_wake_from_d3);
3624EXPORT_SYMBOL(pci_target_state);
3625EXPORT_SYMBOL(pci_prepare_to_sleep);
3626EXPORT_SYMBOL(pci_back_from_sleep);
3627EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI Bus Services, see include/linux/pci.h for further explanation.
   4 *
   5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   6 * David Mosberger-Tang
   7 *
   8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   9 */
  10
  11#include <linux/acpi.h>
  12#include <linux/kernel.h>
  13#include <linux/delay.h>
  14#include <linux/dmi.h>
  15#include <linux/init.h>
  16#include <linux/of.h>
  17#include <linux/of_pci.h>
  18#include <linux/pci.h>
  19#include <linux/pm.h>
  20#include <linux/slab.h>
  21#include <linux/module.h>
  22#include <linux/spinlock.h>
  23#include <linux/string.h>
  24#include <linux/log2.h>
  25#include <linux/logic_pio.h>
  26#include <linux/pci-aspm.h>
  27#include <linux/pm_wakeup.h>
  28#include <linux/interrupt.h>
  29#include <linux/device.h>
  30#include <linux/pm_runtime.h>
  31#include <linux/pci_hotplug.h>
  32#include <linux/vmalloc.h>
  33#include <linux/pci-ats.h>
  34#include <asm/setup.h>
  35#include <asm/dma.h>
  36#include <linux/aer.h>
  37#include "pci.h"
  38
  39const char *pci_power_names[] = {
  40	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  41};
  42EXPORT_SYMBOL_GPL(pci_power_names);
  43
  44int isa_dma_bridge_buggy;
  45EXPORT_SYMBOL(isa_dma_bridge_buggy);
  46
  47int pci_pci_problems;
  48EXPORT_SYMBOL(pci_pci_problems);
  49
  50unsigned int pci_pm_d3_delay;
  51
  52static void pci_pme_list_scan(struct work_struct *work);
  53
  54static LIST_HEAD(pci_pme_list);
  55static DEFINE_MUTEX(pci_pme_list_mutex);
  56static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  57
  58struct pci_pme_device {
  59	struct list_head list;
  60	struct pci_dev *dev;
  61};
  62
  63#define PME_TIMEOUT 1000 /* How long between PME checks */
  64
  65static void pci_dev_d3_sleep(struct pci_dev *dev)
  66{
  67	unsigned int delay = dev->d3_delay;
  68
  69	if (delay < pci_pm_d3_delay)
  70		delay = pci_pm_d3_delay;
  71
  72	if (delay)
  73		msleep(delay);
  74}
  75
  76#ifdef CONFIG_PCI_DOMAINS
  77int pci_domains_supported = 1;
  78#endif
  79
  80#define DEFAULT_CARDBUS_IO_SIZE		(256)
  81#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  82/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  83unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  84unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  85
  86#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  87#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  88/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  89unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  90unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  91
  92#define DEFAULT_HOTPLUG_BUS_SIZE	1
  93unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
  94
  95enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
  96
  97/*
  98 * The default CLS is used if arch didn't set CLS explicitly and not
  99 * all pci devices agree on the same value.  Arch can override either
 100 * the dfl or actual value as it sees fit.  Don't forget this is
 101 * measured in 32-bit words, not bytes.
 102 */
 103u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
 104u8 pci_cache_line_size;
 105
 106/*
 107 * If we set up a device for bus mastering, we need to check the latency
 108 * timer as certain BIOSes forget to set it properly.
 109 */
 110unsigned int pcibios_max_latency = 255;
 111
 112/* If set, the PCIe ARI capability will not be used. */
 113static bool pcie_ari_disabled;
 114
 115/* Disable bridge_d3 for all PCIe ports */
 116static bool pci_bridge_d3_disable;
 117/* Force bridge_d3 for all PCIe ports */
 118static bool pci_bridge_d3_force;
 119
 120static int __init pcie_port_pm_setup(char *str)
 121{
 122	if (!strcmp(str, "off"))
 123		pci_bridge_d3_disable = true;
 124	else if (!strcmp(str, "force"))
 125		pci_bridge_d3_force = true;
 126	return 1;
 127}
 128__setup("pcie_port_pm=", pcie_port_pm_setup);
 129
 130/* Time to wait after a reset for device to become responsive */
 131#define PCIE_RESET_READY_POLL_MS 60000
 132
 133/**
 134 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 135 * @bus: pointer to PCI bus structure to search
 136 *
 137 * Given a PCI bus, returns the highest PCI bus number present in the set
 138 * including the given PCI bus and its list of child PCI buses.
 139 */
 140unsigned char pci_bus_max_busnr(struct pci_bus *bus)
 141{
 142	struct pci_bus *tmp;
 143	unsigned char max, n;
 144
 145	max = bus->busn_res.end;
 146	list_for_each_entry(tmp, &bus->children, node) {
 147		n = pci_bus_max_busnr(tmp);
 148		if (n > max)
 149			max = n;
 150	}
 151	return max;
 152}
 153EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 154
 155#ifdef CONFIG_HAS_IOMEM
 156void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 157{
 158	struct resource *res = &pdev->resource[bar];
 159
 160	/*
 161	 * Make sure the BAR is actually a memory resource, not an IO resource
 162	 */
 163	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
 164		pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
 165		return NULL;
 166	}
 167	return ioremap_nocache(res->start, resource_size(res));
 
 168}
 169EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 
 170
 171void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
 
 
 
 
 
 
 
 
 172{
 173	/*
 174	 * Make sure the BAR is actually a memory resource, not an IO resource
 175	 */
 176	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 177		WARN_ON(1);
 178		return NULL;
 
 
 179	}
 180	return ioremap_wc(pci_resource_start(pdev, bar),
 181			  pci_resource_len(pdev, bar));
 182}
 183EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
 184#endif
 185
 
 
 
 186
 187static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 188				   u8 pos, int cap, int *ttl)
 189{
 190	u8 id;
 191	u16 ent;
 192
 193	pci_bus_read_config_byte(bus, devfn, pos, &pos);
 194
 195	while ((*ttl)--) {
 
 196		if (pos < 0x40)
 197			break;
 198		pos &= ~3;
 199		pci_bus_read_config_word(bus, devfn, pos, &ent);
 200
 201		id = ent & 0xff;
 202		if (id == 0xff)
 203			break;
 204		if (id == cap)
 205			return pos;
 206		pos = (ent >> 8);
 207	}
 208	return 0;
 209}
 210
 211static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 212			       u8 pos, int cap)
 213{
 214	int ttl = PCI_FIND_CAP_TTL;
 215
 216	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 217}
 218
 219int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 220{
 221	return __pci_find_next_cap(dev->bus, dev->devfn,
 222				   pos + PCI_CAP_LIST_NEXT, cap);
 223}
 224EXPORT_SYMBOL_GPL(pci_find_next_capability);
 225
 226static int __pci_bus_find_cap_start(struct pci_bus *bus,
 227				    unsigned int devfn, u8 hdr_type)
 228{
 229	u16 status;
 230
 231	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 232	if (!(status & PCI_STATUS_CAP_LIST))
 233		return 0;
 234
 235	switch (hdr_type) {
 236	case PCI_HEADER_TYPE_NORMAL:
 237	case PCI_HEADER_TYPE_BRIDGE:
 238		return PCI_CAPABILITY_LIST;
 239	case PCI_HEADER_TYPE_CARDBUS:
 240		return PCI_CB_CAPABILITY_LIST;
 
 
 241	}
 242
 243	return 0;
 244}
 245
 246/**
 247 * pci_find_capability - query for devices' capabilities
 248 * @dev: PCI device to query
 249 * @cap: capability code
 250 *
 251 * Tell if a device supports a given PCI capability.
 252 * Returns the address of the requested capability structure within the
 253 * device's PCI configuration space or 0 in case the device does not
 254 * support it.  Possible values for @cap:
 255 *
 256 *  %PCI_CAP_ID_PM           Power Management
 257 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
 258 *  %PCI_CAP_ID_VPD          Vital Product Data
 259 *  %PCI_CAP_ID_SLOTID       Slot Identification
 260 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 261 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
 262 *  %PCI_CAP_ID_PCIX         PCI-X
 263 *  %PCI_CAP_ID_EXP          PCI Express
 264 */
 265int pci_find_capability(struct pci_dev *dev, int cap)
 266{
 267	int pos;
 268
 269	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 270	if (pos)
 271		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 272
 273	return pos;
 274}
 275EXPORT_SYMBOL(pci_find_capability);
 276
 277/**
 278 * pci_bus_find_capability - query for devices' capabilities
 279 * @bus:   the PCI bus to query
 280 * @devfn: PCI device to query
 281 * @cap:   capability code
 282 *
 283 * Like pci_find_capability() but works for pci devices that do not have a
 284 * pci_dev structure set up yet.
 285 *
 286 * Returns the address of the requested capability structure within the
 287 * device's PCI configuration space or 0 in case the device does not
 288 * support it.
 289 */
 290int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 291{
 292	int pos;
 293	u8 hdr_type;
 294
 295	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 296
 297	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 298	if (pos)
 299		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 300
 301	return pos;
 302}
 303EXPORT_SYMBOL(pci_bus_find_capability);
 304
 305/**
 306 * pci_find_next_ext_capability - Find an extended capability
 307 * @dev: PCI device to query
 308 * @start: address at which to start looking (0 to start at beginning of list)
 309 * @cap: capability code
 310 *
 311 * Returns the address of the next matching extended capability structure
 312 * within the device's PCI configuration space or 0 if the device does
 313 * not support it.  Some capabilities can occur several times, e.g., the
 314 * vendor-specific capability, and this provides a way to find them all.
 
 
 
 
 315 */
 316int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
 317{
 318	u32 header;
 319	int ttl;
 320	int pos = PCI_CFG_SPACE_SIZE;
 321
 322	/* minimum 8 bytes per capability */
 323	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 324
 325	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 326		return 0;
 327
 328	if (start)
 329		pos = start;
 330
 331	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 332		return 0;
 333
 334	/*
 335	 * If we have no capabilities, this is indicated by cap ID,
 336	 * cap version and next pointer all being 0.
 337	 */
 338	if (header == 0)
 339		return 0;
 340
 341	while (ttl-- > 0) {
 342		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 343			return pos;
 344
 345		pos = PCI_EXT_CAP_NEXT(header);
 346		if (pos < PCI_CFG_SPACE_SIZE)
 347			break;
 348
 349		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 350			break;
 351	}
 352
 353	return 0;
 354}
 355EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 356
 357/**
 358 * pci_find_ext_capability - Find an extended capability
 359 * @dev: PCI device to query
 360 * @cap: capability code
 
 361 *
 362 * Returns the address of the requested extended capability structure
 363 * within the device's PCI configuration space or 0 if the device does
 364 * not support it.  Possible values for @cap:
 365 *
 366 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 367 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 368 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 369 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 370 */
 371int pci_find_ext_capability(struct pci_dev *dev, int cap)
 
 372{
 373	return pci_find_next_ext_capability(dev, 0, cap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 374}
 375EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 376
 377static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 378{
 379	int rc, ttl = PCI_FIND_CAP_TTL;
 380	u8 cap, mask;
 381
 382	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 383		mask = HT_3BIT_CAP_MASK;
 384	else
 385		mask = HT_5BIT_CAP_MASK;
 386
 387	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 388				      PCI_CAP_ID_HT, &ttl);
 389	while (pos) {
 390		rc = pci_read_config_byte(dev, pos + 3, &cap);
 391		if (rc != PCIBIOS_SUCCESSFUL)
 392			return 0;
 393
 394		if ((cap & mask) == ht_cap)
 395			return pos;
 396
 397		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 398					      pos + PCI_CAP_LIST_NEXT,
 399					      PCI_CAP_ID_HT, &ttl);
 400	}
 401
 402	return 0;
 403}
 404/**
 405 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 406 * @dev: PCI device to query
 407 * @pos: Position from which to continue searching
 408 * @ht_cap: Hypertransport capability code
 409 *
 410 * To be used in conjunction with pci_find_ht_capability() to search for
 411 * all capabilities matching @ht_cap. @pos should always be a value returned
 412 * from pci_find_ht_capability().
 413 *
 414 * NB. To be 100% safe against broken PCI devices, the caller should take
 415 * steps to avoid an infinite loop.
 416 */
 417int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 418{
 419	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 420}
 421EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 422
 423/**
 424 * pci_find_ht_capability - query a device's Hypertransport capabilities
 425 * @dev: PCI device to query
 426 * @ht_cap: Hypertransport capability code
 427 *
 428 * Tell if a device supports a given Hypertransport capability.
 429 * Returns an address within the device's PCI configuration space
 430 * or 0 in case the device does not support the request capability.
 431 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 432 * which has a Hypertransport capability matching @ht_cap.
 433 */
 434int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 435{
 436	int pos;
 437
 438	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 439	if (pos)
 440		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 441
 442	return pos;
 443}
 444EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 445
 446/**
 447 * pci_find_parent_resource - return resource region of parent bus of given region
 448 * @dev: PCI device structure contains resources to be searched
 449 * @res: child resource record for which parent is sought
 450 *
 451 *  For given resource region of given device, return the resource
 452 *  region of parent bus the given region is contained in.
 
 453 */
 454struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 455					  struct resource *res)
 456{
 457	const struct pci_bus *bus = dev->bus;
 458	struct resource *r;
 459	int i;
 
 460
 461	pci_bus_for_each_resource(bus, r, i) {
 462		if (!r)
 463			continue;
 464		if (resource_contains(r, res)) {
 465
 466			/*
 467			 * If the window is prefetchable but the BAR is
 468			 * not, the allocator made a mistake.
 469			 */
 470			if (r->flags & IORESOURCE_PREFETCH &&
 471			    !(res->flags & IORESOURCE_PREFETCH))
 472				return NULL;
 473
 474			/*
 475			 * If we're below a transparent bridge, there may
 476			 * be both a positively-decoded aperture and a
 477			 * subtractively-decoded region that contain the BAR.
 478			 * We want the positively-decoded one, so this depends
 479			 * on pci_bus_for_each_resource() giving us those
 480			 * first.
 481			 */
 482			return r;
 483		}
 484	}
 485	return NULL;
 486}
 487EXPORT_SYMBOL(pci_find_parent_resource);
 488
 489/**
 490 * pci_find_resource - Return matching PCI device resource
 491 * @dev: PCI device to query
 492 * @res: Resource to look for
 493 *
 494 * Goes over standard PCI resources (BARs) and checks if the given resource
 495 * is partially or fully contained in any of them. In that case the
 496 * matching resource is returned, %NULL otherwise.
 497 */
 498struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
 499{
 500	int i;
 501
 502	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
 503		struct resource *r = &dev->resource[i];
 504
 505		if (r->start && resource_contains(r, res))
 506			return r;
 507	}
 508
 509	return NULL;
 510}
 511EXPORT_SYMBOL(pci_find_resource);
 512
 513/**
 514 * pci_find_pcie_root_port - return PCIe Root Port
 515 * @dev: PCI device to query
 516 *
 517 * Traverse up the parent chain and return the PCIe Root Port PCI Device
 518 * for a given PCI Device.
 519 */
 520struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
 521{
 522	struct pci_dev *bridge, *highest_pcie_bridge = dev;
 523
 524	bridge = pci_upstream_bridge(dev);
 525	while (bridge && pci_is_pcie(bridge)) {
 526		highest_pcie_bridge = bridge;
 527		bridge = pci_upstream_bridge(bridge);
 528	}
 529
 530	if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
 531		return NULL;
 532
 533	return highest_pcie_bridge;
 534}
 535EXPORT_SYMBOL(pci_find_pcie_root_port);
 536
 537/**
 538 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
 539 * @dev: the PCI device to operate on
 540 * @pos: config space offset of status word
 541 * @mask: mask of bit(s) to care about in status word
 542 *
 543 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
 544 */
 545int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
 546{
 547	int i;
 548
 549	/* Wait for Transaction Pending bit clean */
 550	for (i = 0; i < 4; i++) {
 551		u16 status;
 552		if (i)
 553			msleep((1 << (i - 1)) * 100);
 554
 555		pci_read_config_word(dev, pos, &status);
 556		if (!(status & mask))
 557			return 1;
 558	}
 559
 560	return 0;
 561}
 562
 563/**
 564 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
 565 * @dev: PCI device to have its BARs restored
 566 *
 567 * Restore the BAR values for a given device, so as to make it
 568 * accessible by its driver.
 569 */
 570static void pci_restore_bars(struct pci_dev *dev)
 
 571{
 572	int i;
 573
 574	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 575		pci_update_resource(dev, i);
 576}
 577
 578static const struct pci_platform_pm_ops *pci_platform_pm;
 579
 580int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
 581{
 582	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
 583	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
 584		return -EINVAL;
 585	pci_platform_pm = ops;
 586	return 0;
 587}
 588
 589static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 590{
 591	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 592}
 593
 594static inline int platform_pci_set_power_state(struct pci_dev *dev,
 595					       pci_power_t t)
 596{
 597	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 598}
 599
 600static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
 601{
 602	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
 
 603}
 604
 605static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 606{
 607	return pci_platform_pm ?
 608			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 609}
 610
 611static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
 612{
 613	return pci_platform_pm ?
 614			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
 615}
 616
 617static inline bool platform_pci_need_resume(struct pci_dev *dev)
 618{
 619	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
 
 620}
 621
 622/**
 623 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 624 *                           given PCI device
 625 * @dev: PCI device to handle.
 626 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 627 *
 628 * RETURN VALUE:
 629 * -EINVAL if the requested state is invalid.
 630 * -EIO if device does not support PCI PM or its PM capabilities register has a
 631 * wrong version, or device doesn't support the requested state.
 632 * 0 if device already is in the requested state.
 633 * 0 if device's power state has been successfully changed.
 634 */
 635static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 636{
 637	u16 pmcsr;
 638	bool need_restore = false;
 639
 640	/* Check if we're already there */
 641	if (dev->current_state == state)
 642		return 0;
 643
 644	if (!dev->pm_cap)
 645		return -EIO;
 646
 647	if (state < PCI_D0 || state > PCI_D3hot)
 648		return -EINVAL;
 649
 650	/* Validate current state:
 651	 * Can enter D0 from any state, but if we can only go deeper
 652	 * to sleep if we're already in a low power state
 653	 */
 654	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 655	    && dev->current_state > state) {
 656		pci_err(dev, "invalid power transition (from state %d to %d)\n",
 657			dev->current_state, state);
 658		return -EINVAL;
 659	}
 660
 661	/* check if this device supports the desired state */
 662	if ((state == PCI_D1 && !dev->d1_support)
 663	   || (state == PCI_D2 && !dev->d2_support))
 664		return -EIO;
 665
 666	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 667
 668	/* If we're (effectively) in D3, force entire word to 0.
 669	 * This doesn't affect PME_Status, disables PME_En, and
 670	 * sets PowerState to 0.
 671	 */
 672	switch (dev->current_state) {
 673	case PCI_D0:
 674	case PCI_D1:
 675	case PCI_D2:
 676		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 677		pmcsr |= state;
 678		break;
 679	case PCI_D3hot:
 680	case PCI_D3cold:
 681	case PCI_UNKNOWN: /* Boot-up */
 682		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 683		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 684			need_restore = true;
 685		/* Fall-through: force to D0 */
 686	default:
 687		pmcsr = 0;
 688		break;
 689	}
 690
 691	/* enter specified state */
 692	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 693
 694	/* Mandatory power management transition delays */
 695	/* see PCI PM 1.1 5.6.1 table 18 */
 696	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 697		pci_dev_d3_sleep(dev);
 698	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 699		udelay(PCI_PM_D2_DELAY);
 700
 701	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 702	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 703	if (dev->current_state != state && printk_ratelimit())
 704		pci_info(dev, "Refused to change power state, currently in D%d\n",
 705			 dev->current_state);
 706
 707	/*
 708	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 709	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 710	 * from D3hot to D0 _may_ perform an internal reset, thereby
 711	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 712	 * For example, at least some versions of the 3c905B and the
 713	 * 3c556B exhibit this behaviour.
 714	 *
 715	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 716	 * devices in a D3hot state at boot.  Consequently, we need to
 717	 * restore at least the BARs so that the device will be
 718	 * accessible to its driver.
 719	 */
 720	if (need_restore)
 721		pci_restore_bars(dev);
 722
 723	if (dev->bus->self)
 724		pcie_aspm_pm_state_change(dev->bus->self);
 725
 726	return 0;
 727}
 728
 729/**
 730 * pci_update_current_state - Read power state of given device and cache it
 
 731 * @dev: PCI device to handle.
 732 * @state: State to cache in case the device doesn't have the PM capability
 733 *
 734 * The power state is read from the PMCSR register, which however is
 735 * inaccessible in D3cold.  The platform firmware is therefore queried first
 736 * to detect accessibility of the register.  In case the platform firmware
 737 * reports an incorrect state or the device isn't power manageable by the
 738 * platform at all, we try to detect D3cold by testing accessibility of the
 739 * vendor ID in config space.
 740 */
 741void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 742{
 743	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
 744	    !pci_device_is_present(dev)) {
 745		dev->current_state = PCI_D3cold;
 746	} else if (dev->pm_cap) {
 747		u16 pmcsr;
 748
 749		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 750		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 751	} else {
 752		dev->current_state = state;
 753	}
 754}
 755
 756/**
 757 * pci_power_up - Put the given device into D0 forcibly
 758 * @dev: PCI device to power up
 759 */
 760void pci_power_up(struct pci_dev *dev)
 761{
 762	if (platform_pci_power_manageable(dev))
 763		platform_pci_set_power_state(dev, PCI_D0);
 764
 765	pci_raw_set_power_state(dev, PCI_D0);
 766	pci_update_current_state(dev, PCI_D0);
 767}
 768
 769/**
 770 * pci_platform_power_transition - Use platform to change device power state
 771 * @dev: PCI device to handle.
 772 * @state: State to put the device into.
 773 */
 774static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 775{
 776	int error;
 777
 778	if (platform_pci_power_manageable(dev)) {
 779		error = platform_pci_set_power_state(dev, state);
 780		if (!error)
 781			pci_update_current_state(dev, state);
 782	} else
 783		error = -ENODEV;
 784
 785	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
 786		dev->current_state = PCI_D0;
 
 787
 788	return error;
 789}
 790
 791/**
 792 * pci_wakeup - Wake up a PCI device
 793 * @pci_dev: Device to handle.
 794 * @ign: ignored parameter
 795 */
 796static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
 797{
 798	pci_wakeup_event(pci_dev);
 799	pm_request_resume(&pci_dev->dev);
 800	return 0;
 801}
 802
 803/**
 804 * pci_wakeup_bus - Walk given bus and wake up devices on it
 805 * @bus: Top bus of the subtree to walk.
 806 */
 807void pci_wakeup_bus(struct pci_bus *bus)
 808{
 809	if (bus)
 810		pci_walk_bus(bus, pci_wakeup, NULL);
 811}
 812
 813/**
 814 * __pci_start_power_transition - Start power transition of a PCI device
 815 * @dev: PCI device to handle.
 816 * @state: State to put the device into.
 817 */
 818static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 819{
 820	if (state == PCI_D0) {
 821		pci_platform_power_transition(dev, PCI_D0);
 822		/*
 823		 * Mandatory power management transition delays, see
 824		 * PCI Express Base Specification Revision 2.0 Section
 825		 * 6.6.1: Conventional Reset.  Do not delay for
 826		 * devices powered on/off by corresponding bridge,
 827		 * because have already delayed for the bridge.
 828		 */
 829		if (dev->runtime_d3cold) {
 830			if (dev->d3cold_delay)
 831				msleep(dev->d3cold_delay);
 832			/*
 833			 * When powering on a bridge from D3cold, the
 834			 * whole hierarchy may be powered on into
 835			 * D0uninitialized state, resume them to give
 836			 * them a chance to suspend again
 837			 */
 838			pci_wakeup_bus(dev->subordinate);
 839		}
 840	}
 841}
 842
 843/**
 844 * __pci_dev_set_current_state - Set current state of a PCI device
 845 * @dev: Device to handle
 846 * @data: pointer to state to be set
 847 */
 848static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
 849{
 850	pci_power_t state = *(pci_power_t *)data;
 851
 852	dev->current_state = state;
 853	return 0;
 854}
 855
 856/**
 857 * pci_bus_set_current_state - Walk given bus and set current state of devices
 858 * @bus: Top bus of the subtree to walk.
 859 * @state: state to be set
 860 */
 861void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
 862{
 863	if (bus)
 864		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
 865}
 866
 867/**
 868 * __pci_complete_power_transition - Complete power transition of a PCI device
 869 * @dev: PCI device to handle.
 870 * @state: State to put the device into.
 871 *
 872 * This function should not be called directly by device drivers.
 873 */
 874int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 875{
 876	int ret;
 877
 878	if (state <= PCI_D0)
 879		return -EINVAL;
 880	ret = pci_platform_power_transition(dev, state);
 881	/* Power off the bridge may power off the whole hierarchy */
 882	if (!ret && state == PCI_D3cold)
 883		pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
 884	return ret;
 885}
 886EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 887
 888/**
 889 * pci_set_power_state - Set the power state of a PCI device
 890 * @dev: PCI device to handle.
 891 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 892 *
 893 * Transition a device to a new power state, using the platform firmware and/or
 894 * the device's PCI PM registers.
 895 *
 896 * RETURN VALUE:
 897 * -EINVAL if the requested state is invalid.
 898 * -EIO if device does not support PCI PM or its PM capabilities register has a
 899 * wrong version, or device doesn't support the requested state.
 900 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
 901 * 0 if device already is in the requested state.
 902 * 0 if the transition is to D3 but D3 is not supported.
 903 * 0 if device's power state has been successfully changed.
 904 */
 905int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 906{
 907	int error;
 908
 909	/* bound the state we're entering */
 910	if (state > PCI_D3cold)
 911		state = PCI_D3cold;
 912	else if (state < PCI_D0)
 913		state = PCI_D0;
 914	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 915		/*
 916		 * If the device or the parent bridge do not support PCI PM,
 917		 * ignore the request if we're doing anything other than putting
 918		 * it into D0 (which would only happen on boot).
 919		 */
 920		return 0;
 921
 922	/* Check if we're already there */
 923	if (dev->current_state == state)
 924		return 0;
 925
 926	__pci_start_power_transition(dev, state);
 927
 928	/* This device is quirked not to be put into D3, so
 929	   don't put it in D3 */
 930	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 931		return 0;
 932
 933	/*
 934	 * To put device in D3cold, we put device into D3hot in native
 935	 * way, then put device into D3cold with platform ops
 936	 */
 937	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
 938					PCI_D3hot : state);
 939
 940	if (!__pci_complete_power_transition(dev, state))
 941		error = 0;
 
 
 
 
 
 
 942
 943	return error;
 944}
 945EXPORT_SYMBOL(pci_set_power_state);
 946
 947/**
 948 * pci_choose_state - Choose the power state of a PCI device
 949 * @dev: PCI device to be suspended
 950 * @state: target sleep state for the whole system. This is the value
 951 *	that is passed to suspend() function.
 952 *
 953 * Returns PCI power state suitable for given device and given system
 954 * message.
 955 */
 956
 957pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 958{
 959	pci_power_t ret;
 960
 961	if (!dev->pm_cap)
 962		return PCI_D0;
 963
 964	ret = platform_pci_choose_state(dev);
 965	if (ret != PCI_POWER_ERROR)
 966		return ret;
 967
 968	switch (state.event) {
 969	case PM_EVENT_ON:
 970		return PCI_D0;
 971	case PM_EVENT_FREEZE:
 972	case PM_EVENT_PRETHAW:
 973		/* REVISIT both freeze and pre-thaw "should" use D0 */
 974	case PM_EVENT_SUSPEND:
 975	case PM_EVENT_HIBERNATE:
 976		return PCI_D3hot;
 977	default:
 978		pci_info(dev, "unrecognized suspend event %d\n",
 979			 state.event);
 980		BUG();
 981	}
 982	return PCI_D0;
 983}
 
 984EXPORT_SYMBOL(pci_choose_state);
 985
 986#define PCI_EXP_SAVE_REGS	7
 987
 988static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
 989						       u16 cap, bool extended)
 990{
 991	struct pci_cap_saved_state *tmp;
 992
 993	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
 994		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
 995			return tmp;
 996	}
 997	return NULL;
 998}
 999
1000struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1001{
1002	return _pci_find_saved_cap(dev, cap, false);
1003}
1004
1005struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1006{
1007	return _pci_find_saved_cap(dev, cap, true);
1008}
1009
1010static int pci_save_pcie_state(struct pci_dev *dev)
1011{
1012	int i = 0;
1013	struct pci_cap_saved_state *save_state;
1014	u16 *cap;
 
1015
1016	if (!pci_is_pcie(dev))
 
1017		return 0;
1018
1019	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1020	if (!save_state) {
1021		pci_err(dev, "buffer not found in %s\n", __func__);
1022		return -ENOMEM;
1023	}
 
1024
1025	cap = (u16 *)&save_state->cap.data[0];
1026	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1027	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1028	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1029	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1030	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1031	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1032	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
 
 
 
 
 
 
 
 
1033
1034	return 0;
1035}
1036
1037static void pci_restore_pcie_state(struct pci_dev *dev)
1038{
1039	int i = 0;
1040	struct pci_cap_saved_state *save_state;
1041	u16 *cap;
 
1042
1043	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1044	if (!save_state)
 
1045		return;
 
 
 
1046
1047	cap = (u16 *)&save_state->cap.data[0];
1048	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1049	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1050	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1051	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1052	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1053	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1054	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
 
 
 
 
 
 
1055}
1056
1057
1058static int pci_save_pcix_state(struct pci_dev *dev)
1059{
1060	int pos;
1061	struct pci_cap_saved_state *save_state;
1062
1063	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1064	if (!pos)
1065		return 0;
1066
1067	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1068	if (!save_state) {
1069		pci_err(dev, "buffer not found in %s\n", __func__);
1070		return -ENOMEM;
1071	}
1072
1073	pci_read_config_word(dev, pos + PCI_X_CMD,
1074			     (u16 *)save_state->cap.data);
1075
1076	return 0;
1077}
1078
1079static void pci_restore_pcix_state(struct pci_dev *dev)
1080{
1081	int i = 0, pos;
1082	struct pci_cap_saved_state *save_state;
1083	u16 *cap;
1084
1085	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1086	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1087	if (!save_state || !pos)
1088		return;
1089	cap = (u16 *)&save_state->cap.data[0];
1090
1091	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1092}
1093
1094
1095/**
1096 * pci_save_state - save the PCI configuration space of a device before suspending
1097 * @dev: - PCI device that we're dealing with
1098 */
1099int pci_save_state(struct pci_dev *dev)
 
1100{
1101	int i;
1102	/* XXX: 100% dword access ok here? */
1103	for (i = 0; i < 16; i++)
1104		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1105	dev->state_saved = true;
1106
1107	i = pci_save_pcie_state(dev);
1108	if (i != 0)
1109		return i;
1110
1111	i = pci_save_pcix_state(dev);
1112	if (i != 0)
1113		return i;
1114
1115	return pci_save_vc_state(dev);
1116}
1117EXPORT_SYMBOL(pci_save_state);
1118
1119static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1120				     u32 saved_val, int retry)
1121{
1122	u32 val;
1123
1124	pci_read_config_dword(pdev, offset, &val);
1125	if (val == saved_val)
1126		return;
1127
1128	for (;;) {
1129		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1130			offset, val, saved_val);
1131		pci_write_config_dword(pdev, offset, saved_val);
1132		if (retry-- <= 0)
1133			return;
1134
1135		pci_read_config_dword(pdev, offset, &val);
1136		if (val == saved_val)
1137			return;
1138
1139		mdelay(1);
1140	}
1141}
1142
1143static void pci_restore_config_space_range(struct pci_dev *pdev,
1144					   int start, int end, int retry)
1145{
1146	int index;
1147
1148	for (index = end; index >= start; index--)
1149		pci_restore_config_dword(pdev, 4 * index,
1150					 pdev->saved_config_space[index],
1151					 retry);
1152}
1153
1154static void pci_restore_config_space(struct pci_dev *pdev)
1155{
1156	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1157		pci_restore_config_space_range(pdev, 10, 15, 0);
1158		/* Restore BARs before the command register. */
1159		pci_restore_config_space_range(pdev, 4, 9, 10);
1160		pci_restore_config_space_range(pdev, 0, 3, 0);
1161	} else {
1162		pci_restore_config_space_range(pdev, 0, 15, 0);
1163	}
1164}
1165
1166/**
1167 * pci_restore_state - Restore the saved state of a PCI device
1168 * @dev: - PCI device that we're dealing with
1169 */
1170void pci_restore_state(struct pci_dev *dev)
1171{
 
 
 
1172	if (!dev->state_saved)
1173		return;
1174
1175	/* PCI Express register must be restored first */
1176	pci_restore_pcie_state(dev);
1177	pci_restore_pasid_state(dev);
1178	pci_restore_pri_state(dev);
1179	pci_restore_ats_state(dev);
1180	pci_restore_vc_state(dev);
1181
1182	pci_cleanup_aer_error_status_regs(dev);
1183
1184	pci_restore_config_space(dev);
1185
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1186	pci_restore_pcix_state(dev);
1187	pci_restore_msi_state(dev);
1188
1189	/* Restore ACS and IOV configuration state */
1190	pci_enable_acs(dev);
1191	pci_restore_iov_state(dev);
1192
1193	dev->state_saved = false;
1194}
1195EXPORT_SYMBOL(pci_restore_state);
1196
1197struct pci_saved_state {
1198	u32 config_space[16];
1199	struct pci_cap_saved_data cap[0];
1200};
1201
1202/**
1203 * pci_store_saved_state - Allocate and return an opaque struct containing
1204 *			   the device saved state.
1205 * @dev: PCI device that we're dealing with
1206 *
1207 * Return NULL if no state or error.
1208 */
1209struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1210{
1211	struct pci_saved_state *state;
1212	struct pci_cap_saved_state *tmp;
1213	struct pci_cap_saved_data *cap;
 
1214	size_t size;
1215
1216	if (!dev->state_saved)
1217		return NULL;
1218
1219	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1220
1221	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1222		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1223
1224	state = kzalloc(size, GFP_KERNEL);
1225	if (!state)
1226		return NULL;
1227
1228	memcpy(state->config_space, dev->saved_config_space,
1229	       sizeof(state->config_space));
1230
1231	cap = state->cap;
1232	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1233		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1234		memcpy(cap, &tmp->cap, len);
1235		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1236	}
1237	/* Empty cap_save terminates list */
1238
1239	return state;
1240}
1241EXPORT_SYMBOL_GPL(pci_store_saved_state);
1242
1243/**
1244 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1245 * @dev: PCI device that we're dealing with
1246 * @state: Saved state returned from pci_store_saved_state()
1247 */
1248int pci_load_saved_state(struct pci_dev *dev,
1249			 struct pci_saved_state *state)
1250{
1251	struct pci_cap_saved_data *cap;
1252
1253	dev->state_saved = false;
1254
1255	if (!state)
1256		return 0;
1257
1258	memcpy(dev->saved_config_space, state->config_space,
1259	       sizeof(state->config_space));
1260
1261	cap = state->cap;
1262	while (cap->size) {
1263		struct pci_cap_saved_state *tmp;
1264
1265		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1266		if (!tmp || tmp->cap.size != cap->size)
1267			return -EINVAL;
1268
1269		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1270		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1271		       sizeof(struct pci_cap_saved_data) + cap->size);
1272	}
1273
1274	dev->state_saved = true;
1275	return 0;
1276}
1277EXPORT_SYMBOL_GPL(pci_load_saved_state);
1278
1279/**
1280 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1281 *				   and free the memory allocated for it.
1282 * @dev: PCI device that we're dealing with
1283 * @state: Pointer to saved state returned from pci_store_saved_state()
1284 */
1285int pci_load_and_free_saved_state(struct pci_dev *dev,
1286				  struct pci_saved_state **state)
1287{
1288	int ret = pci_load_saved_state(dev, *state);
1289	kfree(*state);
1290	*state = NULL;
1291	return ret;
1292}
1293EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1294
1295int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1296{
1297	return pci_enable_resources(dev, bars);
1298}
1299
1300static int do_pci_enable_device(struct pci_dev *dev, int bars)
1301{
1302	int err;
1303	struct pci_dev *bridge;
1304	u16 cmd;
1305	u8 pin;
1306
1307	err = pci_set_power_state(dev, PCI_D0);
1308	if (err < 0 && err != -EIO)
1309		return err;
1310
1311	bridge = pci_upstream_bridge(dev);
1312	if (bridge)
1313		pcie_aspm_powersave_config_link(bridge);
1314
1315	err = pcibios_enable_device(dev, bars);
1316	if (err < 0)
1317		return err;
1318	pci_fixup_device(pci_fixup_enable, dev);
1319
1320	if (dev->msi_enabled || dev->msix_enabled)
1321		return 0;
1322
1323	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1324	if (pin) {
1325		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1326		if (cmd & PCI_COMMAND_INTX_DISABLE)
1327			pci_write_config_word(dev, PCI_COMMAND,
1328					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1329	}
1330
1331	return 0;
1332}
1333
1334/**
1335 * pci_reenable_device - Resume abandoned device
1336 * @dev: PCI device to be resumed
1337 *
1338 *  Note this function is a backend of pci_default_resume and is not supposed
1339 *  to be called by normal code, write proper resume handler and use it instead.
1340 */
1341int pci_reenable_device(struct pci_dev *dev)
1342{
1343	if (pci_is_enabled(dev))
1344		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1345	return 0;
1346}
1347EXPORT_SYMBOL(pci_reenable_device);
1348
1349static void pci_enable_bridge(struct pci_dev *dev)
1350{
1351	struct pci_dev *bridge;
1352	int retval;
1353
1354	bridge = pci_upstream_bridge(dev);
1355	if (bridge)
1356		pci_enable_bridge(bridge);
1357
1358	if (pci_is_enabled(dev)) {
1359		if (!dev->is_busmaster)
1360			pci_set_master(dev);
1361		return;
1362	}
1363
1364	retval = pci_enable_device(dev);
1365	if (retval)
1366		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1367			retval);
1368	pci_set_master(dev);
1369}
1370
1371static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
 
1372{
1373	struct pci_dev *bridge;
1374	int err;
1375	int i, bars = 0;
1376
1377	/*
1378	 * Power state could be unknown at this point, either due to a fresh
1379	 * boot or a device removal call.  So get the current power state
1380	 * so that things like MSI message writing will behave as expected
1381	 * (e.g. if the device really is in D0 at enable time).
1382	 */
1383	if (dev->pm_cap) {
1384		u16 pmcsr;
1385		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1386		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1387	}
1388
1389	if (atomic_inc_return(&dev->enable_cnt) > 1)
1390		return 0;		/* already enabled */
1391
1392	bridge = pci_upstream_bridge(dev);
1393	if (bridge)
1394		pci_enable_bridge(bridge);
1395
1396	/* only skip sriov related */
1397	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1398		if (dev->resource[i].flags & flags)
1399			bars |= (1 << i);
1400	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1401		if (dev->resource[i].flags & flags)
1402			bars |= (1 << i);
1403
1404	err = do_pci_enable_device(dev, bars);
1405	if (err < 0)
1406		atomic_dec(&dev->enable_cnt);
1407	return err;
1408}
1409
1410/**
1411 * pci_enable_device_io - Initialize a device for use with IO space
1412 * @dev: PCI device to be initialized
1413 *
1414 *  Initialize device before it's used by a driver. Ask low-level code
1415 *  to enable I/O resources. Wake up the device if it was suspended.
1416 *  Beware, this function can fail.
1417 */
1418int pci_enable_device_io(struct pci_dev *dev)
1419{
1420	return pci_enable_device_flags(dev, IORESOURCE_IO);
1421}
1422EXPORT_SYMBOL(pci_enable_device_io);
1423
1424/**
1425 * pci_enable_device_mem - Initialize a device for use with Memory space
1426 * @dev: PCI device to be initialized
1427 *
1428 *  Initialize device before it's used by a driver. Ask low-level code
1429 *  to enable Memory resources. Wake up the device if it was suspended.
1430 *  Beware, this function can fail.
1431 */
1432int pci_enable_device_mem(struct pci_dev *dev)
1433{
1434	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1435}
1436EXPORT_SYMBOL(pci_enable_device_mem);
1437
1438/**
1439 * pci_enable_device - Initialize device before it's used by a driver.
1440 * @dev: PCI device to be initialized
1441 *
1442 *  Initialize device before it's used by a driver. Ask low-level code
1443 *  to enable I/O and memory. Wake up the device if it was suspended.
1444 *  Beware, this function can fail.
1445 *
1446 *  Note we don't actually enable the device many times if we call
1447 *  this function repeatedly (we just increment the count).
1448 */
1449int pci_enable_device(struct pci_dev *dev)
1450{
1451	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1452}
1453EXPORT_SYMBOL(pci_enable_device);
1454
1455/*
1456 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1457 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1458 * there's no need to track it separately.  pci_devres is initialized
1459 * when a device is enabled using managed PCI device enable interface.
1460 */
1461struct pci_devres {
1462	unsigned int enabled:1;
1463	unsigned int pinned:1;
1464	unsigned int orig_intx:1;
1465	unsigned int restore_intx:1;
1466	unsigned int mwi:1;
1467	u32 region_mask;
1468};
1469
1470static void pcim_release(struct device *gendev, void *res)
1471{
1472	struct pci_dev *dev = to_pci_dev(gendev);
1473	struct pci_devres *this = res;
1474	int i;
1475
1476	if (dev->msi_enabled)
1477		pci_disable_msi(dev);
1478	if (dev->msix_enabled)
1479		pci_disable_msix(dev);
1480
1481	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1482		if (this->region_mask & (1 << i))
1483			pci_release_region(dev, i);
1484
1485	if (this->mwi)
1486		pci_clear_mwi(dev);
1487
1488	if (this->restore_intx)
1489		pci_intx(dev, this->orig_intx);
1490
1491	if (this->enabled && !this->pinned)
1492		pci_disable_device(dev);
1493}
1494
1495static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1496{
1497	struct pci_devres *dr, *new_dr;
1498
1499	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1500	if (dr)
1501		return dr;
1502
1503	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1504	if (!new_dr)
1505		return NULL;
1506	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1507}
1508
1509static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1510{
1511	if (pci_is_managed(pdev))
1512		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1513	return NULL;
1514}
1515
1516/**
1517 * pcim_enable_device - Managed pci_enable_device()
1518 * @pdev: PCI device to be initialized
1519 *
1520 * Managed pci_enable_device().
1521 */
1522int pcim_enable_device(struct pci_dev *pdev)
1523{
1524	struct pci_devres *dr;
1525	int rc;
1526
1527	dr = get_pci_dr(pdev);
1528	if (unlikely(!dr))
1529		return -ENOMEM;
1530	if (dr->enabled)
1531		return 0;
1532
1533	rc = pci_enable_device(pdev);
1534	if (!rc) {
1535		pdev->is_managed = 1;
1536		dr->enabled = 1;
1537	}
1538	return rc;
1539}
1540EXPORT_SYMBOL(pcim_enable_device);
1541
1542/**
1543 * pcim_pin_device - Pin managed PCI device
1544 * @pdev: PCI device to pin
1545 *
1546 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1547 * driver detach.  @pdev must have been enabled with
1548 * pcim_enable_device().
1549 */
1550void pcim_pin_device(struct pci_dev *pdev)
1551{
1552	struct pci_devres *dr;
1553
1554	dr = find_pci_dr(pdev);
1555	WARN_ON(!dr || !dr->enabled);
1556	if (dr)
1557		dr->pinned = 1;
1558}
1559EXPORT_SYMBOL(pcim_pin_device);
1560
1561/*
1562 * pcibios_add_device - provide arch specific hooks when adding device dev
1563 * @dev: the PCI device being added
1564 *
1565 * Permits the platform to provide architecture specific functionality when
1566 * devices are added. This is the default implementation. Architecture
1567 * implementations can override this.
1568 */
1569int __weak pcibios_add_device(struct pci_dev *dev)
1570{
1571	return 0;
1572}
1573
1574/**
1575 * pcibios_release_device - provide arch specific hooks when releasing device dev
1576 * @dev: the PCI device being released
1577 *
1578 * Permits the platform to provide architecture specific functionality when
1579 * devices are released. This is the default implementation. Architecture
1580 * implementations can override this.
1581 */
1582void __weak pcibios_release_device(struct pci_dev *dev) {}
1583
1584/**
1585 * pcibios_disable_device - disable arch specific PCI resources for device dev
1586 * @dev: the PCI device to disable
1587 *
1588 * Disables architecture specific PCI resources for the device. This
1589 * is the default implementation. Architecture implementations can
1590 * override this.
1591 */
1592void __weak pcibios_disable_device(struct pci_dev *dev) {}
1593
1594/**
1595 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1596 * @irq: ISA IRQ to penalize
1597 * @active: IRQ active or not
1598 *
1599 * Permits the platform to provide architecture-specific functionality when
1600 * penalizing ISA IRQs. This is the default implementation. Architecture
1601 * implementations can override this.
1602 */
1603void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1604
1605static void do_pci_disable_device(struct pci_dev *dev)
1606{
1607	u16 pci_command;
1608
1609	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1610	if (pci_command & PCI_COMMAND_MASTER) {
1611		pci_command &= ~PCI_COMMAND_MASTER;
1612		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1613	}
1614
1615	pcibios_disable_device(dev);
1616}
1617
1618/**
1619 * pci_disable_enabled_device - Disable device without updating enable_cnt
1620 * @dev: PCI device to disable
1621 *
1622 * NOTE: This function is a backend of PCI power management routines and is
1623 * not supposed to be called drivers.
1624 */
1625void pci_disable_enabled_device(struct pci_dev *dev)
1626{
1627	if (pci_is_enabled(dev))
1628		do_pci_disable_device(dev);
1629}
1630
1631/**
1632 * pci_disable_device - Disable PCI device after use
1633 * @dev: PCI device to be disabled
1634 *
1635 * Signal to the system that the PCI device is not in use by the system
1636 * anymore.  This only involves disabling PCI bus-mastering, if active.
1637 *
1638 * Note we don't actually disable the device until all callers of
1639 * pci_enable_device() have called pci_disable_device().
1640 */
1641void pci_disable_device(struct pci_dev *dev)
 
1642{
1643	struct pci_devres *dr;
1644
1645	dr = find_pci_dr(dev);
1646	if (dr)
1647		dr->enabled = 0;
1648
1649	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1650		      "disabling already-disabled device");
1651
1652	if (atomic_dec_return(&dev->enable_cnt) != 0)
1653		return;
1654
1655	do_pci_disable_device(dev);
1656
1657	dev->is_busmaster = 0;
1658}
1659EXPORT_SYMBOL(pci_disable_device);
1660
1661/**
1662 * pcibios_set_pcie_reset_state - set reset state for device dev
1663 * @dev: the PCIe device reset
1664 * @state: Reset state to enter into
1665 *
1666 *
1667 * Sets the PCIe reset state for the device. This is the default
1668 * implementation. Architecture implementations can override this.
1669 */
1670int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1671					enum pcie_reset_state state)
1672{
1673	return -EINVAL;
1674}
1675
1676/**
1677 * pci_set_pcie_reset_state - set reset state for device dev
1678 * @dev: the PCIe device reset
1679 * @state: Reset state to enter into
1680 *
1681 *
1682 * Sets the PCI reset state for the device.
1683 */
1684int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1685{
1686	return pcibios_set_pcie_reset_state(dev, state);
1687}
1688EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1689
1690/**
1691 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
1692 * @dev: PCIe root port or event collector.
1693 */
1694void pcie_clear_root_pme_status(struct pci_dev *dev)
1695{
1696	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
1697}
1698
1699/**
1700 * pci_check_pme_status - Check if given device has generated PME.
1701 * @dev: Device to check.
1702 *
1703 * Check the PME status of the device and if set, clear it and clear PME enable
1704 * (if set).  Return 'true' if PME status and PME enable were both set or
1705 * 'false' otherwise.
1706 */
1707bool pci_check_pme_status(struct pci_dev *dev)
1708{
1709	int pmcsr_pos;
1710	u16 pmcsr;
1711	bool ret = false;
1712
1713	if (!dev->pm_cap)
1714		return false;
1715
1716	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1717	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1718	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1719		return false;
1720
1721	/* Clear PME status. */
1722	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1723	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1724		/* Disable PME to avoid interrupt flood. */
1725		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1726		ret = true;
1727	}
1728
1729	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1730
1731	return ret;
1732}
1733
1734/**
1735 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1736 * @dev: Device to handle.
1737 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
1738 *
1739 * Check if @dev has generated PME and queue a resume request for it in that
1740 * case.
1741 */
1742static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
1743{
1744	if (pme_poll_reset && dev->pme_poll)
1745		dev->pme_poll = false;
1746
1747	if (pci_check_pme_status(dev)) {
1748		pci_wakeup_event(dev);
1749		pm_request_resume(&dev->dev);
1750	}
1751	return 0;
1752}
1753
1754/**
1755 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1756 * @bus: Top bus of the subtree to walk.
1757 */
1758void pci_pme_wakeup_bus(struct pci_bus *bus)
1759{
1760	if (bus)
1761		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
1762}
1763
1764
1765/**
1766 * pci_pme_capable - check the capability of PCI device to generate PME#
1767 * @dev: PCI device to handle.
1768 * @state: PCI state from which device will issue PME#.
1769 */
1770bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1771{
1772	if (!dev->pm_cap)
1773		return false;
1774
1775	return !!(dev->pme_support & (1 << state));
1776}
1777EXPORT_SYMBOL(pci_pme_capable);
1778
1779static void pci_pme_list_scan(struct work_struct *work)
1780{
1781	struct pci_pme_device *pme_dev, *n;
1782
1783	mutex_lock(&pci_pme_list_mutex);
1784	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1785		if (pme_dev->dev->pme_poll) {
1786			struct pci_dev *bridge;
1787
1788			bridge = pme_dev->dev->bus->self;
1789			/*
1790			 * If bridge is in low power state, the
1791			 * configuration space of subordinate devices
1792			 * may be not accessible
1793			 */
1794			if (bridge && bridge->current_state != PCI_D0)
1795				continue;
1796			pci_pme_wakeup(pme_dev->dev, NULL);
1797		} else {
1798			list_del(&pme_dev->list);
1799			kfree(pme_dev);
1800		}
1801	}
1802	if (!list_empty(&pci_pme_list))
1803		queue_delayed_work(system_freezable_wq, &pci_pme_work,
1804				   msecs_to_jiffies(PME_TIMEOUT));
1805	mutex_unlock(&pci_pme_list_mutex);
1806}
1807
1808static void __pci_pme_active(struct pci_dev *dev, bool enable)
1809{
1810	u16 pmcsr;
1811
1812	if (!dev->pme_support)
1813		return;
1814
1815	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1816	/* Clear PME_Status by writing 1 to it and enable PME# */
1817	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1818	if (!enable)
1819		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1820
1821	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1822}
1823
1824/**
1825 * pci_pme_restore - Restore PME configuration after config space restore.
1826 * @dev: PCI device to update.
 
1827 */
1828void pci_pme_restore(struct pci_dev *dev)
 
1829{
1830	u16 pmcsr;
1831
1832	if (!dev->pme_support)
1833		return;
1834
1835	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1836	if (dev->wakeup_prepared) {
1837		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1838		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
1839	} else {
1840		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1841		pmcsr |= PCI_PM_CTRL_PME_STATUS;
1842	}
1843	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1844}
1845
1846/**
1847 * pci_pme_active - enable or disable PCI device's PME# function
1848 * @dev: PCI device to handle.
1849 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1850 *
1851 * The caller must verify that the device is capable of generating PME# before
1852 * calling this function with @enable equal to 'true'.
1853 */
1854void pci_pme_active(struct pci_dev *dev, bool enable)
1855{
1856	__pci_pme_active(dev, enable);
 
 
 
 
 
 
 
 
 
 
 
1857
1858	/*
1859	 * PCI (as opposed to PCIe) PME requires that the device have
1860	 * its PME# line hooked up correctly. Not all hardware vendors
1861	 * do this, so the PME never gets delivered and the device
1862	 * remains asleep. The easiest way around this is to
1863	 * periodically walk the list of suspended devices and check
1864	 * whether any have their PME flag set. The assumption is that
1865	 * we'll wake up often enough anyway that this won't be a huge
1866	 * hit, and the power savings from the devices will still be a
1867	 * win.
1868	 *
1869	 * Although PCIe uses in-band PME message instead of PME# line
1870	 * to report PME, PME does not work for some PCIe devices in
1871	 * reality.  For example, there are devices that set their PME
1872	 * status bits, but don't really bother to send a PME message;
1873	 * there are PCI Express Root Ports that don't bother to
1874	 * trigger interrupts when they receive PME messages from the
1875	 * devices below.  So PME poll is used for PCIe devices too.
1876	 */
1877
1878	if (dev->pme_poll) {
1879		struct pci_pme_device *pme_dev;
1880		if (enable) {
1881			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1882					  GFP_KERNEL);
1883			if (!pme_dev) {
1884				pci_warn(dev, "can't enable PME#\n");
1885				return;
1886			}
1887			pme_dev->dev = dev;
1888			mutex_lock(&pci_pme_list_mutex);
1889			list_add(&pme_dev->list, &pci_pme_list);
1890			if (list_is_singular(&pci_pme_list))
1891				queue_delayed_work(system_freezable_wq,
1892						   &pci_pme_work,
1893						   msecs_to_jiffies(PME_TIMEOUT));
1894			mutex_unlock(&pci_pme_list_mutex);
1895		} else {
1896			mutex_lock(&pci_pme_list_mutex);
1897			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1898				if (pme_dev->dev == dev) {
1899					list_del(&pme_dev->list);
1900					kfree(pme_dev);
1901					break;
1902				}
1903			}
1904			mutex_unlock(&pci_pme_list_mutex);
1905		}
1906	}
1907
1908	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
 
 
1909}
1910EXPORT_SYMBOL(pci_pme_active);
1911
1912/**
1913 * __pci_enable_wake - enable PCI device as wakeup event source
1914 * @dev: PCI device affected
1915 * @state: PCI state from which device will issue wakeup events
 
1916 * @enable: True to enable event generation; false to disable
1917 *
1918 * This enables the device as a wakeup event source, or disables it.
1919 * When such events involves platform-specific hooks, those hooks are
1920 * called automatically by this routine.
1921 *
1922 * Devices with legacy power management (no standard PCI PM capabilities)
1923 * always require such platform hooks.
1924 *
1925 * RETURN VALUE:
1926 * 0 is returned on success
1927 * -EINVAL is returned if device is not supposed to wake up the system
1928 * Error code depending on the platform is returned if both the platform and
1929 * the native mechanism fail to enable the generation of wake-up events
1930 */
1931static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
 
1932{
1933	int ret = 0;
1934
1935	/*
1936	 * Bridges can only signal wakeup on behalf of subordinate devices,
1937	 * but that is set up elsewhere, so skip them.
1938	 */
1939	if (pci_has_subordinate(dev))
1940		return 0;
1941
1942	/* Don't do the same thing twice in a row for one device. */
1943	if (!!enable == !!dev->wakeup_prepared)
1944		return 0;
1945
1946	/*
1947	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1948	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1949	 * enable.  To disable wake-up we call the platform first, for symmetry.
1950	 */
1951
1952	if (enable) {
1953		int error;
1954
1955		if (pci_pme_capable(dev, state))
1956			pci_pme_active(dev, true);
1957		else
1958			ret = 1;
1959		error = platform_pci_set_wakeup(dev, true);
 
1960		if (ret)
1961			ret = error;
1962		if (!ret)
1963			dev->wakeup_prepared = true;
1964	} else {
1965		platform_pci_set_wakeup(dev, false);
 
 
 
1966		pci_pme_active(dev, false);
1967		dev->wakeup_prepared = false;
1968	}
1969
1970	return ret;
1971}
1972
1973/**
1974 * pci_enable_wake - change wakeup settings for a PCI device
1975 * @pci_dev: Target device
1976 * @state: PCI state from which device will issue wakeup events
1977 * @enable: Whether or not to enable event generation
1978 *
1979 * If @enable is set, check device_may_wakeup() for the device before calling
1980 * __pci_enable_wake() for it.
1981 */
1982int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
1983{
1984	if (enable && !device_may_wakeup(&pci_dev->dev))
1985		return -EINVAL;
1986
1987	return __pci_enable_wake(pci_dev, state, enable);
1988}
1989EXPORT_SYMBOL(pci_enable_wake);
1990
1991/**
1992 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1993 * @dev: PCI device to prepare
1994 * @enable: True to enable wake-up event generation; false to disable
1995 *
1996 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1997 * and this function allows them to set that up cleanly - pci_enable_wake()
1998 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1999 * ordering constraints.
2000 *
2001 * This function only returns error code if the device is not allowed to wake
2002 * up the system from sleep or it is not capable of generating PME# from both
2003 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2004 */
2005int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2006{
2007	return pci_pme_capable(dev, PCI_D3cold) ?
2008			pci_enable_wake(dev, PCI_D3cold, enable) :
2009			pci_enable_wake(dev, PCI_D3hot, enable);
2010}
2011EXPORT_SYMBOL(pci_wake_from_d3);
2012
2013/**
2014 * pci_target_state - find an appropriate low power state for a given PCI dev
2015 * @dev: PCI device
2016 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2017 *
2018 * Use underlying platform code to find a supported low power state for @dev.
2019 * If the platform can't manage @dev, return the deepest state from which it
2020 * can generate wake events, based on any available PME info.
2021 */
2022static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2023{
2024	pci_power_t target_state = PCI_D3hot;
2025
2026	if (platform_pci_power_manageable(dev)) {
2027		/*
2028		 * Call the platform to choose the target state of the device
2029		 * and enable wake-up from this state if supported.
2030		 */
2031		pci_power_t state = platform_pci_choose_state(dev);
2032
2033		switch (state) {
2034		case PCI_POWER_ERROR:
2035		case PCI_UNKNOWN:
2036			break;
2037		case PCI_D1:
2038		case PCI_D2:
2039			if (pci_no_d1d2(dev))
2040				break;
2041		default:
2042			target_state = state;
2043		}
2044
2045		return target_state;
2046	}
2047
2048	if (!dev->pm_cap)
2049		target_state = PCI_D0;
2050
2051	/*
2052	 * If the device is in D3cold even though it's not power-manageable by
2053	 * the platform, it may have been powered down by non-standard means.
2054	 * Best to let it slumber.
2055	 */
2056	if (dev->current_state == PCI_D3cold)
2057		target_state = PCI_D3cold;
2058
2059	if (wakeup) {
2060		/*
2061		 * Find the deepest state from which the device can generate
2062		 * wake-up events, make it the target state and enable device
2063		 * to generate PME#.
2064		 */
2065		if (dev->pme_support) {
2066			while (target_state
2067			      && !(dev->pme_support & (1 << target_state)))
2068				target_state--;
2069		}
2070	}
2071
2072	return target_state;
2073}
2074
2075/**
2076 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
2077 * @dev: Device to handle.
2078 *
2079 * Choose the power state appropriate for the device depending on whether
2080 * it can wake up the system and/or is power manageable by the platform
2081 * (PCI_D3hot is the default) and put the device into that state.
2082 */
2083int pci_prepare_to_sleep(struct pci_dev *dev)
2084{
2085	bool wakeup = device_may_wakeup(&dev->dev);
2086	pci_power_t target_state = pci_target_state(dev, wakeup);
2087	int error;
2088
2089	if (target_state == PCI_POWER_ERROR)
2090		return -EIO;
2091
2092	pci_enable_wake(dev, target_state, wakeup);
2093
2094	error = pci_set_power_state(dev, target_state);
2095
2096	if (error)
2097		pci_enable_wake(dev, target_state, false);
2098
2099	return error;
2100}
2101EXPORT_SYMBOL(pci_prepare_to_sleep);
2102
2103/**
2104 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
2105 * @dev: Device to handle.
2106 *
2107 * Disable device's system wake-up capability and put it into D0.
2108 */
2109int pci_back_from_sleep(struct pci_dev *dev)
2110{
2111	pci_enable_wake(dev, PCI_D0, false);
2112	return pci_set_power_state(dev, PCI_D0);
2113}
2114EXPORT_SYMBOL(pci_back_from_sleep);
2115
2116/**
2117 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2118 * @dev: PCI device being suspended.
2119 *
2120 * Prepare @dev to generate wake-up events at run time and put it into a low
2121 * power state.
2122 */
2123int pci_finish_runtime_suspend(struct pci_dev *dev)
2124{
2125	pci_power_t target_state;
2126	int error;
2127
2128	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2129	if (target_state == PCI_POWER_ERROR)
2130		return -EIO;
2131
2132	dev->runtime_d3cold = target_state == PCI_D3cold;
2133
2134	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2135
2136	error = pci_set_power_state(dev, target_state);
2137
2138	if (error) {
2139		pci_enable_wake(dev, target_state, false);
2140		dev->runtime_d3cold = false;
2141	}
2142
2143	return error;
2144}
2145
2146/**
2147 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2148 * @dev: Device to check.
2149 *
2150 * Return true if the device itself is capable of generating wake-up events
2151 * (through the platform or using the native PCIe PME) or if the device supports
2152 * PME and one of its upstream bridges can generate wake-up events.
2153 */
2154bool pci_dev_run_wake(struct pci_dev *dev)
2155{
2156	struct pci_bus *bus = dev->bus;
2157
 
 
 
2158	if (!dev->pme_support)
2159		return false;
2160
2161	/* PME-capable in principle, but not from the target power state */
2162	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2163		return false;
2164
2165	if (device_can_wakeup(&dev->dev))
2166		return true;
2167
2168	while (bus->parent) {
2169		struct pci_dev *bridge = bus->self;
2170
2171		if (device_can_wakeup(&bridge->dev))
2172			return true;
2173
2174		bus = bus->parent;
2175	}
2176
2177	/* We have reached the root bus. */
2178	if (bus->bridge)
2179		return device_can_wakeup(bus->bridge);
2180
2181	return false;
2182}
2183EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2184
2185/**
2186 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2187 * @pci_dev: Device to check.
2188 *
2189 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2190 * reconfigured due to wakeup settings difference between system and runtime
2191 * suspend and the current power state of it is suitable for the upcoming
2192 * (system) transition.
2193 *
2194 * If the device is not configured for system wakeup, disable PME for it before
2195 * returning 'true' to prevent it from waking up the system unnecessarily.
2196 */
2197bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2198{
2199	struct device *dev = &pci_dev->dev;
2200	bool wakeup = device_may_wakeup(dev);
2201
2202	if (!pm_runtime_suspended(dev)
2203	    || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
2204	    || platform_pci_need_resume(pci_dev))
2205		return false;
2206
2207	/*
2208	 * At this point the device is good to go unless it's been configured
2209	 * to generate PME at the runtime suspend time, but it is not supposed
2210	 * to wake up the system.  In that case, simply disable PME for it
2211	 * (it will have to be re-enabled on exit from system resume).
2212	 *
2213	 * If the device's power state is D3cold and the platform check above
2214	 * hasn't triggered, the device's configuration is suitable and we don't
2215	 * need to manipulate it at all.
2216	 */
2217	spin_lock_irq(&dev->power.lock);
2218
2219	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
2220	    !wakeup)
2221		__pci_pme_active(pci_dev, false);
2222
2223	spin_unlock_irq(&dev->power.lock);
2224	return true;
2225}
2226
2227/**
2228 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2229 * @pci_dev: Device to handle.
2230 *
2231 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2232 * it might have been disabled during the prepare phase of system suspend if
2233 * the device was not configured for system wakeup.
2234 */
2235void pci_dev_complete_resume(struct pci_dev *pci_dev)
2236{
2237	struct device *dev = &pci_dev->dev;
2238
2239	if (!pci_dev_run_wake(pci_dev))
2240		return;
2241
2242	spin_lock_irq(&dev->power.lock);
2243
2244	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2245		__pci_pme_active(pci_dev, true);
2246
2247	spin_unlock_irq(&dev->power.lock);
2248}
2249
2250void pci_config_pm_runtime_get(struct pci_dev *pdev)
2251{
2252	struct device *dev = &pdev->dev;
2253	struct device *parent = dev->parent;
2254
2255	if (parent)
2256		pm_runtime_get_sync(parent);
2257	pm_runtime_get_noresume(dev);
2258	/*
2259	 * pdev->current_state is set to PCI_D3cold during suspending,
2260	 * so wait until suspending completes
2261	 */
2262	pm_runtime_barrier(dev);
2263	/*
2264	 * Only need to resume devices in D3cold, because config
2265	 * registers are still accessible for devices suspended but
2266	 * not in D3cold.
2267	 */
2268	if (pdev->current_state == PCI_D3cold)
2269		pm_runtime_resume(dev);
2270}
2271
2272void pci_config_pm_runtime_put(struct pci_dev *pdev)
2273{
2274	struct device *dev = &pdev->dev;
2275	struct device *parent = dev->parent;
2276
2277	pm_runtime_put(dev);
2278	if (parent)
2279		pm_runtime_put_sync(parent);
2280}
2281
2282/**
2283 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2284 * @bridge: Bridge to check
2285 *
2286 * This function checks if it is possible to move the bridge to D3.
2287 * Currently we only allow D3 for recent enough PCIe ports.
2288 */
2289bool pci_bridge_d3_possible(struct pci_dev *bridge)
2290{
2291	if (!pci_is_pcie(bridge))
2292		return false;
2293
2294	switch (pci_pcie_type(bridge)) {
2295	case PCI_EXP_TYPE_ROOT_PORT:
2296	case PCI_EXP_TYPE_UPSTREAM:
2297	case PCI_EXP_TYPE_DOWNSTREAM:
2298		if (pci_bridge_d3_disable)
2299			return false;
2300
2301		/*
2302		 * Hotplug interrupts cannot be delivered if the link is down,
2303		 * so parents of a hotplug port must stay awake. In addition,
2304		 * hotplug ports handled by firmware in System Management Mode
2305		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2306		 * For simplicity, disallow in general for now.
2307		 */
2308		if (bridge->is_hotplug_bridge)
2309			return false;
2310
2311		if (pci_bridge_d3_force)
2312			return true;
2313
2314		/*
2315		 * It should be safe to put PCIe ports from 2015 or newer
2316		 * to D3.
2317		 */
2318		if (dmi_get_bios_year() >= 2015)
2319			return true;
2320		break;
2321	}
2322
2323	return false;
2324}
2325
2326static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2327{
2328	bool *d3cold_ok = data;
2329
2330	if (/* The device needs to be allowed to go D3cold ... */
2331	    dev->no_d3cold || !dev->d3cold_allowed ||
2332
2333	    /* ... and if it is wakeup capable to do so from D3cold. */
2334	    (device_may_wakeup(&dev->dev) &&
2335	     !pci_pme_capable(dev, PCI_D3cold)) ||
2336
2337	    /* If it is a bridge it must be allowed to go to D3. */
2338	    !pci_power_manageable(dev))
2339
2340		*d3cold_ok = false;
2341
2342	return !*d3cold_ok;
2343}
2344
2345/*
2346 * pci_bridge_d3_update - Update bridge D3 capabilities
2347 * @dev: PCI device which is changed
2348 *
2349 * Update upstream bridge PM capabilities accordingly depending on if the
2350 * device PM configuration was changed or the device is being removed.  The
2351 * change is also propagated upstream.
2352 */
2353void pci_bridge_d3_update(struct pci_dev *dev)
2354{
2355	bool remove = !device_is_registered(&dev->dev);
2356	struct pci_dev *bridge;
2357	bool d3cold_ok = true;
2358
2359	bridge = pci_upstream_bridge(dev);
2360	if (!bridge || !pci_bridge_d3_possible(bridge))
2361		return;
2362
2363	/*
2364	 * If D3 is currently allowed for the bridge, removing one of its
2365	 * children won't change that.
2366	 */
2367	if (remove && bridge->bridge_d3)
2368		return;
2369
2370	/*
2371	 * If D3 is currently allowed for the bridge and a child is added or
2372	 * changed, disallowance of D3 can only be caused by that child, so
2373	 * we only need to check that single device, not any of its siblings.
2374	 *
2375	 * If D3 is currently not allowed for the bridge, checking the device
2376	 * first may allow us to skip checking its siblings.
2377	 */
2378	if (!remove)
2379		pci_dev_check_d3cold(dev, &d3cold_ok);
2380
2381	/*
2382	 * If D3 is currently not allowed for the bridge, this may be caused
2383	 * either by the device being changed/removed or any of its siblings,
2384	 * so we need to go through all children to find out if one of them
2385	 * continues to block D3.
2386	 */
2387	if (d3cold_ok && !bridge->bridge_d3)
2388		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2389			     &d3cold_ok);
2390
2391	if (bridge->bridge_d3 != d3cold_ok) {
2392		bridge->bridge_d3 = d3cold_ok;
2393		/* Propagate change to upstream bridges */
2394		pci_bridge_d3_update(bridge);
2395	}
2396}
2397
2398/**
2399 * pci_d3cold_enable - Enable D3cold for device
2400 * @dev: PCI device to handle
2401 *
2402 * This function can be used in drivers to enable D3cold from the device
2403 * they handle.  It also updates upstream PCI bridge PM capabilities
2404 * accordingly.
2405 */
2406void pci_d3cold_enable(struct pci_dev *dev)
2407{
2408	if (dev->no_d3cold) {
2409		dev->no_d3cold = false;
2410		pci_bridge_d3_update(dev);
2411	}
2412}
2413EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2414
2415/**
2416 * pci_d3cold_disable - Disable D3cold for device
2417 * @dev: PCI device to handle
2418 *
2419 * This function can be used in drivers to disable D3cold from the device
2420 * they handle.  It also updates upstream PCI bridge PM capabilities
2421 * accordingly.
2422 */
2423void pci_d3cold_disable(struct pci_dev *dev)
2424{
2425	if (!dev->no_d3cold) {
2426		dev->no_d3cold = true;
2427		pci_bridge_d3_update(dev);
2428	}
2429}
2430EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2431
2432/**
2433 * pci_pm_init - Initialize PM functions of given PCI device
2434 * @dev: PCI device to handle.
2435 */
2436void pci_pm_init(struct pci_dev *dev)
2437{
2438	int pm;
2439	u16 pmc;
2440
2441	pm_runtime_forbid(&dev->dev);
2442	pm_runtime_set_active(&dev->dev);
2443	pm_runtime_enable(&dev->dev);
2444	device_enable_async_suspend(&dev->dev);
2445	dev->wakeup_prepared = false;
2446
2447	dev->pm_cap = 0;
2448	dev->pme_support = 0;
2449
2450	/* find PCI PM capability in list */
2451	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2452	if (!pm)
2453		return;
2454	/* Check device's ability to generate PME# */
2455	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2456
2457	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2458		pci_err(dev, "unsupported PM cap regs version (%u)\n",
2459			pmc & PCI_PM_CAP_VER_MASK);
2460		return;
2461	}
2462
2463	dev->pm_cap = pm;
2464	dev->d3_delay = PCI_PM_D3_WAIT;
2465	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2466	dev->bridge_d3 = pci_bridge_d3_possible(dev);
2467	dev->d3cold_allowed = true;
2468
2469	dev->d1_support = false;
2470	dev->d2_support = false;
2471	if (!pci_no_d1d2(dev)) {
2472		if (pmc & PCI_PM_CAP_D1)
2473			dev->d1_support = true;
2474		if (pmc & PCI_PM_CAP_D2)
2475			dev->d2_support = true;
2476
2477		if (dev->d1_support || dev->d2_support)
2478			pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
2479				   dev->d1_support ? " D1" : "",
2480				   dev->d2_support ? " D2" : "");
2481	}
2482
2483	pmc &= PCI_PM_CAP_PME_MASK;
2484	if (pmc) {
2485		pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
 
2486			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2487			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2488			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2489			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2490			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2491		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2492		dev->pme_poll = true;
2493		/*
2494		 * Make device's PM flags reflect the wake-up capability, but
2495		 * let the user space enable it to wake up the system as needed.
2496		 */
2497		device_set_wakeup_capable(&dev->dev, true);
2498		/* Disable the PME# generation functionality */
2499		pci_pme_active(dev, false);
 
 
2500	}
2501}
2502
2503static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2504{
2505	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2506
2507	switch (prop) {
2508	case PCI_EA_P_MEM:
2509	case PCI_EA_P_VF_MEM:
2510		flags |= IORESOURCE_MEM;
2511		break;
2512	case PCI_EA_P_MEM_PREFETCH:
2513	case PCI_EA_P_VF_MEM_PREFETCH:
2514		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2515		break;
2516	case PCI_EA_P_IO:
2517		flags |= IORESOURCE_IO;
2518		break;
2519	default:
2520		return 0;
2521	}
2522
2523	return flags;
2524}
2525
2526static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2527					    u8 prop)
2528{
2529	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2530		return &dev->resource[bei];
2531#ifdef CONFIG_PCI_IOV
2532	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2533		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2534		return &dev->resource[PCI_IOV_RESOURCES +
2535				      bei - PCI_EA_BEI_VF_BAR0];
2536#endif
2537	else if (bei == PCI_EA_BEI_ROM)
2538		return &dev->resource[PCI_ROM_RESOURCE];
2539	else
2540		return NULL;
2541}
2542
2543/* Read an Enhanced Allocation (EA) entry */
2544static int pci_ea_read(struct pci_dev *dev, int offset)
2545{
2546	struct resource *res;
2547	int ent_size, ent_offset = offset;
2548	resource_size_t start, end;
2549	unsigned long flags;
2550	u32 dw0, bei, base, max_offset;
2551	u8 prop;
2552	bool support_64 = (sizeof(resource_size_t) >= 8);
2553
2554	pci_read_config_dword(dev, ent_offset, &dw0);
2555	ent_offset += 4;
2556
2557	/* Entry size field indicates DWORDs after 1st */
2558	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2559
2560	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2561		goto out;
2562
2563	bei = (dw0 & PCI_EA_BEI) >> 4;
2564	prop = (dw0 & PCI_EA_PP) >> 8;
2565
2566	/*
2567	 * If the Property is in the reserved range, try the Secondary
2568	 * Property instead.
2569	 */
2570	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2571		prop = (dw0 & PCI_EA_SP) >> 16;
2572	if (prop > PCI_EA_P_BRIDGE_IO)
2573		goto out;
2574
2575	res = pci_ea_get_resource(dev, bei, prop);
2576	if (!res) {
2577		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2578		goto out;
2579	}
2580
2581	flags = pci_ea_flags(dev, prop);
2582	if (!flags) {
2583		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2584		goto out;
2585	}
2586
2587	/* Read Base */
2588	pci_read_config_dword(dev, ent_offset, &base);
2589	start = (base & PCI_EA_FIELD_MASK);
2590	ent_offset += 4;
2591
2592	/* Read MaxOffset */
2593	pci_read_config_dword(dev, ent_offset, &max_offset);
2594	ent_offset += 4;
2595
2596	/* Read Base MSBs (if 64-bit entry) */
2597	if (base & PCI_EA_IS_64) {
2598		u32 base_upper;
2599
2600		pci_read_config_dword(dev, ent_offset, &base_upper);
2601		ent_offset += 4;
2602
2603		flags |= IORESOURCE_MEM_64;
2604
2605		/* entry starts above 32-bit boundary, can't use */
2606		if (!support_64 && base_upper)
2607			goto out;
2608
2609		if (support_64)
2610			start |= ((u64)base_upper << 32);
2611	}
2612
2613	end = start + (max_offset | 0x03);
2614
2615	/* Read MaxOffset MSBs (if 64-bit entry) */
2616	if (max_offset & PCI_EA_IS_64) {
2617		u32 max_offset_upper;
2618
2619		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2620		ent_offset += 4;
2621
2622		flags |= IORESOURCE_MEM_64;
2623
2624		/* entry too big, can't use */
2625		if (!support_64 && max_offset_upper)
2626			goto out;
2627
2628		if (support_64)
2629			end += ((u64)max_offset_upper << 32);
2630	}
2631
2632	if (end < start) {
2633		pci_err(dev, "EA Entry crosses address boundary\n");
2634		goto out;
2635	}
2636
2637	if (ent_size != ent_offset - offset) {
2638		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2639			ent_size, ent_offset - offset);
2640		goto out;
2641	}
2642
2643	res->name = pci_name(dev);
2644	res->start = start;
2645	res->end = end;
2646	res->flags = flags;
2647
2648	if (bei <= PCI_EA_BEI_BAR5)
2649		pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2650			   bei, res, prop);
2651	else if (bei == PCI_EA_BEI_ROM)
2652		pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
2653			   res, prop);
2654	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
2655		pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
2656			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
2657	else
2658		pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
2659			   bei, res, prop);
2660
2661out:
2662	return offset + ent_size;
2663}
2664
2665/* Enhanced Allocation Initialization */
2666void pci_ea_init(struct pci_dev *dev)
2667{
2668	int ea;
2669	u8 num_ent;
2670	int offset;
2671	int i;
2672
2673	/* find PCI EA capability in list */
2674	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2675	if (!ea)
2676		return;
2677
2678	/* determine the number of entries */
2679	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2680					&num_ent);
2681	num_ent &= PCI_EA_NUM_ENT_MASK;
2682
2683	offset = ea + PCI_EA_FIRST_ENT;
2684
2685	/* Skip DWORD 2 for type 1 functions */
2686	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2687		offset += 4;
2688
2689	/* parse each EA entry */
2690	for (i = 0; i < num_ent; ++i)
2691		offset = pci_ea_read(dev, offset);
2692}
2693
2694static void pci_add_saved_cap(struct pci_dev *pci_dev,
2695	struct pci_cap_saved_state *new_cap)
2696{
2697	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2698}
2699
2700/**
2701 * _pci_add_cap_save_buffer - allocate buffer for saving given
2702 *                            capability registers
2703 * @dev: the PCI device
2704 * @cap: the capability to allocate the buffer for
2705 * @extended: Standard or Extended capability ID
2706 * @size: requested size of the buffer
2707 */
2708static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2709				    bool extended, unsigned int size)
2710{
2711	int pos;
2712	struct pci_cap_saved_state *save_state;
2713
2714	if (extended)
2715		pos = pci_find_ext_capability(dev, cap);
2716	else
2717		pos = pci_find_capability(dev, cap);
2718
2719	if (!pos)
2720		return 0;
2721
2722	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2723	if (!save_state)
2724		return -ENOMEM;
2725
2726	save_state->cap.cap_nr = cap;
2727	save_state->cap.cap_extended = extended;
2728	save_state->cap.size = size;
2729	pci_add_saved_cap(dev, save_state);
2730
2731	return 0;
2732}
2733
2734int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2735{
2736	return _pci_add_cap_save_buffer(dev, cap, false, size);
2737}
2738
2739int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2740{
2741	return _pci_add_cap_save_buffer(dev, cap, true, size);
2742}
2743
2744/**
2745 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2746 * @dev: the PCI device
2747 */
2748void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2749{
2750	int error;
2751
2752	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2753					PCI_EXP_SAVE_REGS * sizeof(u16));
2754	if (error)
2755		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
 
2756
2757	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2758	if (error)
2759		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
2760
2761	pci_allocate_vc_save_buffers(dev);
2762}
2763
2764void pci_free_cap_save_buffers(struct pci_dev *dev)
2765{
2766	struct pci_cap_saved_state *tmp;
2767	struct hlist_node *n;
2768
2769	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
2770		kfree(tmp);
2771}
2772
2773/**
2774 * pci_configure_ari - enable or disable ARI forwarding
2775 * @dev: the PCI device
2776 *
2777 * If @dev and its upstream bridge both support ARI, enable ARI in the
2778 * bridge.  Otherwise, disable ARI in the bridge.
2779 */
2780void pci_configure_ari(struct pci_dev *dev)
2781{
 
2782	u32 cap;
 
2783	struct pci_dev *bridge;
2784
2785	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
 
 
 
 
2786		return;
2787
2788	bridge = dev->bus->self;
2789	if (!bridge)
 
 
 
 
2790		return;
2791
2792	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
 
 
 
 
 
2793	if (!(cap & PCI_EXP_DEVCAP2_ARI))
2794		return;
2795
2796	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2797		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2798					 PCI_EXP_DEVCTL2_ARI);
2799		bridge->ari_enabled = 1;
2800	} else {
2801		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2802					   PCI_EXP_DEVCTL2_ARI);
2803		bridge->ari_enabled = 0;
2804	}
2805}
2806
2807static int pci_acs_enable;
2808
2809/**
2810 * pci_request_acs - ask for ACS to be enabled if supported
 
 
 
 
 
 
2811 */
2812void pci_request_acs(void)
2813{
2814	pci_acs_enable = 1;
 
 
 
 
 
 
 
 
 
 
 
 
2815}
 
2816
2817/**
2818 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
2819 * @dev: the PCI device
 
2820 */
2821static void pci_std_enable_acs(struct pci_dev *dev)
2822{
2823	int pos;
2824	u16 cap;
2825	u16 ctrl;
2826
2827	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
 
 
 
2828	if (!pos)
2829		return;
2830
2831	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2832	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2833
2834	/* Source Validation */
2835	ctrl |= (cap & PCI_ACS_SV);
2836
2837	/* P2P Request Redirect */
2838	ctrl |= (cap & PCI_ACS_RR);
 
2839
2840	/* P2P Completion Redirect */
2841	ctrl |= (cap & PCI_ACS_CR);
 
 
 
 
 
 
 
 
2842
2843	/* Upstream Forwarding */
2844	ctrl |= (cap & PCI_ACS_UF);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2845
2846	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2847}
 
2848
2849/**
2850 * pci_enable_acs - enable ACS if hardware support it
2851 * @dev: the PCI device
 
 
2852 */
2853void pci_enable_acs(struct pci_dev *dev)
2854{
2855	if (!pci_acs_enable)
 
 
 
2856		return;
2857
2858	if (!pci_dev_specific_enable_acs(dev))
 
2859		return;
2860
2861	pci_std_enable_acs(dev);
 
 
2862}
 
2863
2864static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
 
 
 
 
 
 
 
2865{
2866	int pos;
2867	u16 cap, ctrl;
 
 
 
2868
2869	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2870	if (!pos)
2871		return false;
2872
2873	/*
2874	 * Except for egress control, capabilities are either required
2875	 * or only required if controllable.  Features missing from the
2876	 * capability field can therefore be assumed as hard-wired enabled.
2877	 */
2878	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2879	acs_flags &= (cap | PCI_ACS_EC);
2880
2881	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2882	return (ctrl & acs_flags) == acs_flags;
2883}
 
2884
2885/**
2886 * pci_acs_enabled - test ACS against required flags for a given device
2887 * @pdev: device to test
2888 * @acs_flags: required PCI ACS flags
2889 *
2890 * Return true if the device supports the provided flags.  Automatically
2891 * filters out flags that are not implemented on multifunction devices.
2892 *
2893 * Note that this interface checks the effective ACS capabilities of the
2894 * device rather than the actual capabilities.  For instance, most single
2895 * function endpoints are not required to support ACS because they have no
2896 * opportunity for peer-to-peer access.  We therefore return 'true'
2897 * regardless of whether the device exposes an ACS capability.  This makes
2898 * it much easier for callers of this function to ignore the actual type
2899 * or topology of the device when testing ACS support.
2900 */
2901bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2902{
 
 
2903	int ret;
2904
2905	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2906	if (ret >= 0)
2907		return ret > 0;
2908
2909	/*
2910	 * Conventional PCI and PCI-X devices never support ACS, either
2911	 * effectively or actually.  The shared bus topology implies that
2912	 * any device on the bus can receive or snoop DMA.
2913	 */
2914	if (!pci_is_pcie(pdev))
2915		return false;
2916
2917	switch (pci_pcie_type(pdev)) {
2918	/*
2919	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
2920	 * but since their primary interface is PCI/X, we conservatively
2921	 * handle them as we would a non-PCIe device.
2922	 */
2923	case PCI_EXP_TYPE_PCIE_BRIDGE:
2924	/*
2925	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
2926	 * applicable... must never implement an ACS Extended Capability...".
2927	 * This seems arbitrary, but we take a conservative interpretation
2928	 * of this statement.
2929	 */
2930	case PCI_EXP_TYPE_PCI_BRIDGE:
2931	case PCI_EXP_TYPE_RC_EC:
2932		return false;
2933	/*
2934	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2935	 * implement ACS in order to indicate their peer-to-peer capabilities,
2936	 * regardless of whether they are single- or multi-function devices.
2937	 */
2938	case PCI_EXP_TYPE_DOWNSTREAM:
2939	case PCI_EXP_TYPE_ROOT_PORT:
2940		return pci_acs_flags_enabled(pdev, acs_flags);
2941	/*
2942	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2943	 * implemented by the remaining PCIe types to indicate peer-to-peer
2944	 * capabilities, but only when they are part of a multifunction
2945	 * device.  The footnote for section 6.12 indicates the specific
2946	 * PCIe types included here.
2947	 */
2948	case PCI_EXP_TYPE_ENDPOINT:
2949	case PCI_EXP_TYPE_UPSTREAM:
2950	case PCI_EXP_TYPE_LEG_END:
2951	case PCI_EXP_TYPE_RC_END:
2952		if (!pdev->multifunction)
2953			break;
2954
2955		return pci_acs_flags_enabled(pdev, acs_flags);
 
 
 
 
2956	}
2957
2958	/*
2959	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
2960	 * to single function devices with the exception of downstream ports.
2961	 */
2962	return true;
2963}
 
2964
2965/**
2966 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2967 * @start: starting downstream device
2968 * @end: ending upstream device or NULL to search to the root bus
2969 * @acs_flags: required flags
2970 *
2971 * Walk up a device tree from start to end testing PCI ACS support.  If
2972 * any step along the way does not support the required flags, return false.
2973 */
2974bool pci_acs_path_enabled(struct pci_dev *start,
2975			  struct pci_dev *end, u16 acs_flags)
2976{
2977	struct pci_dev *pdev, *parent = start;
 
2978
2979	do {
2980		pdev = parent;
2981
2982		if (!pci_acs_enabled(pdev, acs_flags))
2983			return false;
 
2984
2985		if (pci_is_root_bus(pdev->bus))
2986			return (end == NULL);
 
2987
2988		parent = pdev->bus->self;
2989	} while (pdev != end);
 
 
 
2990
2991	return true;
 
 
 
 
 
 
 
 
2992}
2993
2994/**
2995 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
2996 * @pdev: PCI device
2997 * @bar: BAR to find
 
2998 *
2999 * Helper to find the position of the ctrl register for a BAR.
3000 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3001 * Returns -ENOENT if no ctrl register for the BAR could be found.
3002 */
3003static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3004{
3005	unsigned int pos, nbars, i;
3006	u32 ctrl;
3007
3008	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3009	if (!pos)
3010		return -ENOTSUPP;
3011
3012	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3013	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3014		    PCI_REBAR_CTRL_NBAR_SHIFT;
3015
3016	for (i = 0; i < nbars; i++, pos += 8) {
3017		int bar_idx;
3018
3019		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3020		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3021		if (bar_idx == bar)
3022			return pos;
3023	}
3024
3025	return -ENOENT;
3026}
3027
3028/**
3029 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3030 * @pdev: PCI device
3031 * @bar: BAR to query
3032 *
3033 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3034 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3035 */
3036u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3037{
3038	int pos;
3039	u32 cap;
3040
3041	pos = pci_rebar_find_pos(pdev, bar);
3042	if (pos < 0)
3043		return 0;
3044
3045	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3046	return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3047}
3048
3049/**
3050 * pci_rebar_get_current_size - get the current size of a BAR
3051 * @pdev: PCI device
3052 * @bar: BAR to set size to
3053 *
3054 * Read the size of a BAR from the resizable BAR config.
3055 * Returns size if found or negative error code.
3056 */
3057int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3058{
3059	int pos;
3060	u32 ctrl;
3061
3062	pos = pci_rebar_find_pos(pdev, bar);
3063	if (pos < 0)
3064		return pos;
 
3065
3066	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3067	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
3068}
 
 
 
3069
3070/**
3071 * pci_rebar_set_size - set a new size for a BAR
3072 * @pdev: PCI device
3073 * @bar: BAR to set size to
3074 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3075 *
3076 * Set the new size of a BAR as defined in the spec.
3077 * Returns zero if resizing was successful, error code otherwise.
3078 */
3079int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3080{
3081	int pos;
3082	u32 ctrl;
3083
3084	pos = pci_rebar_find_pos(pdev, bar);
3085	if (pos < 0)
3086		return pos;
3087
3088	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3089	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3090	ctrl |= size << 8;
3091	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3092	return 0;
3093}
3094
3095/**
3096 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3097 * @dev: the PCI device
3098 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3099 *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3100 *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3101 *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3102 *
3103 * Return 0 if all upstream bridges support AtomicOp routing, egress
3104 * blocking is disabled on all upstream ports, and the root port supports
3105 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3106 * AtomicOp completion), or negative otherwise.
3107 */
3108int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3109{
3110	struct pci_bus *bus = dev->bus;
3111	struct pci_dev *bridge;
3112	u32 cap, ctl2;
 
 
 
3113
3114	if (!pci_is_pcie(dev))
3115		return -EINVAL;
3116
3117	/*
3118	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3119	 * AtomicOp requesters.  For now, we only support endpoints as
3120	 * requesters and root ports as completers.  No endpoints as
3121	 * completers, and no peer-to-peer.
3122	 */
3123
3124	switch (pci_pcie_type(dev)) {
3125	case PCI_EXP_TYPE_ENDPOINT:
3126	case PCI_EXP_TYPE_LEG_END:
3127	case PCI_EXP_TYPE_RC_END:
3128		break;
3129	default:
3130		return -EINVAL;
3131	}
3132
3133	while (bus->parent) {
3134		bridge = bus->self;
3135
3136		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
 
3137
3138		switch (pci_pcie_type(bridge)) {
3139		/* Ensure switch ports support AtomicOp routing */
3140		case PCI_EXP_TYPE_UPSTREAM:
3141		case PCI_EXP_TYPE_DOWNSTREAM:
3142			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3143				return -EINVAL;
3144			break;
3145
3146		/* Ensure root port supports all the sizes we care about */
3147		case PCI_EXP_TYPE_ROOT_PORT:
3148			if ((cap & cap_mask) != cap_mask)
3149				return -EINVAL;
3150			break;
3151		}
3152
3153		/* Ensure upstream ports don't block AtomicOps on egress */
3154		if (!bridge->has_secondary_link) {
3155			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3156						   &ctl2);
3157			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3158				return -EINVAL;
3159		}
3160
3161		bus = bus->parent;
3162	}
3163
3164	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3165				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3166	return 0;
3167}
3168EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3169
3170/**
3171 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3172 * @dev: the PCI device
3173 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3174 *
3175 * Perform INTx swizzling for a device behind one level of bridge.  This is
3176 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3177 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3178 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3179 * the PCI Express Base Specification, Revision 2.1)
3180 */
3181u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3182{
3183	int slot;
3184
3185	if (pci_ari_enabled(dev->bus))
3186		slot = 0;
3187	else
3188		slot = PCI_SLOT(dev->devfn);
3189
3190	return (((pin - 1) + slot) % 4) + 1;
3191}
3192
3193int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
 
3194{
3195	u8 pin;
3196
3197	pin = dev->pin;
3198	if (!pin)
3199		return -1;
3200
3201	while (!pci_is_root_bus(dev->bus)) {
3202		pin = pci_swizzle_interrupt_pin(dev, pin);
3203		dev = dev->bus->self;
3204	}
3205	*bridge = dev;
3206	return pin;
3207}
3208
3209/**
3210 * pci_common_swizzle - swizzle INTx all the way to root bridge
3211 * @dev: the PCI device
3212 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3213 *
3214 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3215 * bridges all the way up to a PCI root bus.
3216 */
3217u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3218{
3219	u8 pin = *pinp;
3220
3221	while (!pci_is_root_bus(dev->bus)) {
3222		pin = pci_swizzle_interrupt_pin(dev, pin);
3223		dev = dev->bus->self;
3224	}
3225	*pinp = pin;
3226	return PCI_SLOT(dev->devfn);
3227}
3228EXPORT_SYMBOL_GPL(pci_common_swizzle);
3229
3230/**
3231 *	pci_release_region - Release a PCI bar
3232 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
3233 *	@bar: BAR to release
3234 *
3235 *	Releases the PCI I/O and memory resources previously reserved by a
3236 *	successful call to pci_request_region.  Call this function only
3237 *	after all use of the PCI regions has ceased.
3238 */
3239void pci_release_region(struct pci_dev *pdev, int bar)
3240{
3241	struct pci_devres *dr;
3242
3243	if (pci_resource_len(pdev, bar) == 0)
3244		return;
3245	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3246		release_region(pci_resource_start(pdev, bar),
3247				pci_resource_len(pdev, bar));
3248	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3249		release_mem_region(pci_resource_start(pdev, bar),
3250				pci_resource_len(pdev, bar));
3251
3252	dr = find_pci_dr(pdev);
3253	if (dr)
3254		dr->region_mask &= ~(1 << bar);
3255}
3256EXPORT_SYMBOL(pci_release_region);
3257
3258/**
3259 *	__pci_request_region - Reserved PCI I/O and memory resource
3260 *	@pdev: PCI device whose resources are to be reserved
3261 *	@bar: BAR to be reserved
3262 *	@res_name: Name to be associated with resource.
3263 *	@exclusive: whether the region access is exclusive or not
3264 *
3265 *	Mark the PCI region associated with PCI device @pdev BR @bar as
3266 *	being reserved by owner @res_name.  Do not access any
3267 *	address inside the PCI regions unless this call returns
3268 *	successfully.
3269 *
3270 *	If @exclusive is set, then the region is marked so that userspace
3271 *	is explicitly not allowed to map the resource via /dev/mem or
3272 *	sysfs MMIO access.
3273 *
3274 *	Returns 0 on success, or %EBUSY on error.  A warning
3275 *	message is also printed on failure.
3276 */
3277static int __pci_request_region(struct pci_dev *pdev, int bar,
3278				const char *res_name, int exclusive)
3279{
3280	struct pci_devres *dr;
3281
3282	if (pci_resource_len(pdev, bar) == 0)
3283		return 0;
3284
3285	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3286		if (!request_region(pci_resource_start(pdev, bar),
3287			    pci_resource_len(pdev, bar), res_name))
3288			goto err_out;
3289	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 
3290		if (!__request_mem_region(pci_resource_start(pdev, bar),
3291					pci_resource_len(pdev, bar), res_name,
3292					exclusive))
3293			goto err_out;
3294	}
3295
3296	dr = find_pci_dr(pdev);
3297	if (dr)
3298		dr->region_mask |= 1 << bar;
3299
3300	return 0;
3301
3302err_out:
3303	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3304		 &pdev->resource[bar]);
3305	return -EBUSY;
3306}
3307
3308/**
3309 *	pci_request_region - Reserve PCI I/O and memory resource
3310 *	@pdev: PCI device whose resources are to be reserved
3311 *	@bar: BAR to be reserved
3312 *	@res_name: Name to be associated with resource
3313 *
3314 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
3315 *	being reserved by owner @res_name.  Do not access any
3316 *	address inside the PCI regions unless this call returns
3317 *	successfully.
3318 *
3319 *	Returns 0 on success, or %EBUSY on error.  A warning
3320 *	message is also printed on failure.
3321 */
3322int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3323{
3324	return __pci_request_region(pdev, bar, res_name, 0);
3325}
3326EXPORT_SYMBOL(pci_request_region);
3327
3328/**
3329 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
3330 *	@pdev: PCI device whose resources are to be reserved
3331 *	@bar: BAR to be reserved
3332 *	@res_name: Name to be associated with resource.
3333 *
3334 *	Mark the PCI region associated with PCI device @pdev BR @bar as
3335 *	being reserved by owner @res_name.  Do not access any
3336 *	address inside the PCI regions unless this call returns
3337 *	successfully.
3338 *
3339 *	Returns 0 on success, or %EBUSY on error.  A warning
3340 *	message is also printed on failure.
3341 *
3342 *	The key difference that _exclusive makes it that userspace is
3343 *	explicitly not allowed to map the resource via /dev/mem or
3344 *	sysfs.
3345 */
3346int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3347				 const char *res_name)
3348{
3349	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3350}
3351EXPORT_SYMBOL(pci_request_region_exclusive);
3352
3353/**
3354 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3355 * @pdev: PCI device whose resources were previously reserved
3356 * @bars: Bitmask of BARs to be released
3357 *
3358 * Release selected PCI I/O and memory resources previously reserved.
3359 * Call this function only after all use of the PCI regions has ceased.
3360 */
3361void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3362{
3363	int i;
3364
3365	for (i = 0; i < 6; i++)
3366		if (bars & (1 << i))
3367			pci_release_region(pdev, i);
3368}
3369EXPORT_SYMBOL(pci_release_selected_regions);
3370
3371static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3372					  const char *res_name, int excl)
3373{
3374	int i;
3375
3376	for (i = 0; i < 6; i++)
3377		if (bars & (1 << i))
3378			if (__pci_request_region(pdev, i, res_name, excl))
3379				goto err_out;
3380	return 0;
3381
3382err_out:
3383	while (--i >= 0)
3384		if (bars & (1 << i))
3385			pci_release_region(pdev, i);
3386
3387	return -EBUSY;
3388}
3389
3390
3391/**
3392 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3393 * @pdev: PCI device whose resources are to be reserved
3394 * @bars: Bitmask of BARs to be requested
3395 * @res_name: Name to be associated with resource
3396 */
3397int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3398				 const char *res_name)
3399{
3400	return __pci_request_selected_regions(pdev, bars, res_name, 0);
3401}
3402EXPORT_SYMBOL(pci_request_selected_regions);
3403
3404int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3405					   const char *res_name)
3406{
3407	return __pci_request_selected_regions(pdev, bars, res_name,
3408			IORESOURCE_EXCLUSIVE);
3409}
3410EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3411
3412/**
3413 *	pci_release_regions - Release reserved PCI I/O and memory resources
3414 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
3415 *
3416 *	Releases all PCI I/O and memory resources previously reserved by a
3417 *	successful call to pci_request_regions.  Call this function only
3418 *	after all use of the PCI regions has ceased.
3419 */
3420
3421void pci_release_regions(struct pci_dev *pdev)
3422{
3423	pci_release_selected_regions(pdev, (1 << 6) - 1);
3424}
3425EXPORT_SYMBOL(pci_release_regions);
3426
3427/**
3428 *	pci_request_regions - Reserved PCI I/O and memory resources
3429 *	@pdev: PCI device whose resources are to be reserved
3430 *	@res_name: Name to be associated with resource.
3431 *
3432 *	Mark all PCI regions associated with PCI device @pdev as
3433 *	being reserved by owner @res_name.  Do not access any
3434 *	address inside the PCI regions unless this call returns
3435 *	successfully.
3436 *
3437 *	Returns 0 on success, or %EBUSY on error.  A warning
3438 *	message is also printed on failure.
3439 */
3440int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3441{
3442	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3443}
3444EXPORT_SYMBOL(pci_request_regions);
3445
3446/**
3447 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3448 *	@pdev: PCI device whose resources are to be reserved
3449 *	@res_name: Name to be associated with resource.
3450 *
3451 *	Mark all PCI regions associated with PCI device @pdev as
3452 *	being reserved by owner @res_name.  Do not access any
3453 *	address inside the PCI regions unless this call returns
3454 *	successfully.
3455 *
3456 *	pci_request_regions_exclusive() will mark the region so that
3457 *	/dev/mem and the sysfs MMIO access will not be allowed.
3458 *
3459 *	Returns 0 on success, or %EBUSY on error.  A warning
3460 *	message is also printed on failure.
3461 */
3462int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3463{
3464	return pci_request_selected_regions_exclusive(pdev,
3465					((1 << 6) - 1), res_name);
3466}
3467EXPORT_SYMBOL(pci_request_regions_exclusive);
3468
3469/*
3470 * Record the PCI IO range (expressed as CPU physical address + size).
3471 * Return a negative value if an error has occured, zero otherwise
3472 */
3473int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3474			resource_size_t	size)
3475{
3476	int ret = 0;
3477#ifdef PCI_IOBASE
3478	struct logic_pio_hwaddr *range;
3479
3480	if (!size || addr + size < addr)
3481		return -EINVAL;
3482
3483	range = kzalloc(sizeof(*range), GFP_ATOMIC);
3484	if (!range)
3485		return -ENOMEM;
3486
3487	range->fwnode = fwnode;
3488	range->size = size;
3489	range->hw_start = addr;
3490	range->flags = LOGIC_PIO_CPU_MMIO;
3491
3492	ret = logic_pio_register_range(range);
3493	if (ret)
3494		kfree(range);
3495#endif
3496
3497	return ret;
3498}
3499
3500phys_addr_t pci_pio_to_address(unsigned long pio)
3501{
3502	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3503
3504#ifdef PCI_IOBASE
3505	if (pio >= MMIO_UPPER_LIMIT)
3506		return address;
3507
3508	address = logic_pio_to_hwaddr(pio);
3509#endif
3510
3511	return address;
3512}
3513
3514unsigned long __weak pci_address_to_pio(phys_addr_t address)
3515{
3516#ifdef PCI_IOBASE
3517	return logic_pio_trans_cpuaddr(address);
3518#else
3519	if (address > IO_SPACE_LIMIT)
3520		return (unsigned long)-1;
3521
3522	return (unsigned long) address;
3523#endif
3524}
3525
3526/**
3527 *	pci_remap_iospace - Remap the memory mapped I/O space
3528 *	@res: Resource describing the I/O space
3529 *	@phys_addr: physical address of range to be mapped
3530 *
3531 *	Remap the memory mapped I/O space described by the @res
3532 *	and the CPU physical address @phys_addr into virtual address space.
3533 *	Only architectures that have memory mapped IO functions defined
3534 *	(and the PCI_IOBASE value defined) should call this function.
3535 */
3536int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3537{
3538#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3539	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3540
3541	if (!(res->flags & IORESOURCE_IO))
3542		return -EINVAL;
3543
3544	if (res->end > IO_SPACE_LIMIT)
3545		return -EINVAL;
3546
3547	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3548				  pgprot_device(PAGE_KERNEL));
3549#else
3550	/* this architecture does not have memory mapped I/O space,
3551	   so this function should never be called */
3552	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3553	return -ENODEV;
3554#endif
3555}
3556EXPORT_SYMBOL(pci_remap_iospace);
3557
3558/**
3559 *	pci_unmap_iospace - Unmap the memory mapped I/O space
3560 *	@res: resource to be unmapped
3561 *
3562 *	Unmap the CPU virtual address @res from virtual address space.
3563 *	Only architectures that have memory mapped IO functions defined
3564 *	(and the PCI_IOBASE value defined) should call this function.
3565 */
3566void pci_unmap_iospace(struct resource *res)
3567{
3568#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3569	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3570
3571	unmap_kernel_range(vaddr, resource_size(res));
3572#endif
3573}
3574EXPORT_SYMBOL(pci_unmap_iospace);
3575
3576/**
3577 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
3578 * @dev: Generic device to remap IO address for
3579 * @offset: Resource address to map
3580 * @size: Size of map
3581 *
3582 * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
3583 * detach.
3584 */
3585void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3586				      resource_size_t offset,
3587				      resource_size_t size)
3588{
3589	void __iomem **ptr, *addr;
3590
3591	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3592	if (!ptr)
3593		return NULL;
3594
3595	addr = pci_remap_cfgspace(offset, size);
3596	if (addr) {
3597		*ptr = addr;
3598		devres_add(dev, ptr);
3599	} else
3600		devres_free(ptr);
3601
3602	return addr;
3603}
3604EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3605
3606/**
3607 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
3608 * @dev: generic device to handle the resource for
3609 * @res: configuration space resource to be handled
3610 *
3611 * Checks that a resource is a valid memory region, requests the memory
3612 * region and ioremaps with pci_remap_cfgspace() API that ensures the
3613 * proper PCI configuration space memory attributes are guaranteed.
3614 *
3615 * All operations are managed and will be undone on driver detach.
3616 *
3617 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
3618 * on failure. Usage example::
3619 *
3620 *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3621 *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
3622 *	if (IS_ERR(base))
3623 *		return PTR_ERR(base);
3624 */
3625void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3626					  struct resource *res)
3627{
3628	resource_size_t size;
3629	const char *name;
3630	void __iomem *dest_ptr;
3631
3632	BUG_ON(!dev);
3633
3634	if (!res || resource_type(res) != IORESOURCE_MEM) {
3635		dev_err(dev, "invalid resource\n");
3636		return IOMEM_ERR_PTR(-EINVAL);
3637	}
3638
3639	size = resource_size(res);
3640	name = res->name ?: dev_name(dev);
3641
3642	if (!devm_request_mem_region(dev, res->start, size, name)) {
3643		dev_err(dev, "can't request region for resource %pR\n", res);
3644		return IOMEM_ERR_PTR(-EBUSY);
3645	}
3646
3647	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3648	if (!dest_ptr) {
3649		dev_err(dev, "ioremap failed for resource %pR\n", res);
3650		devm_release_mem_region(dev, res->start, size);
3651		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3652	}
3653
3654	return dest_ptr;
3655}
3656EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3657
3658static void __pci_set_master(struct pci_dev *dev, bool enable)
3659{
3660	u16 old_cmd, cmd;
3661
3662	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3663	if (enable)
3664		cmd = old_cmd | PCI_COMMAND_MASTER;
3665	else
3666		cmd = old_cmd & ~PCI_COMMAND_MASTER;
3667	if (cmd != old_cmd) {
3668		pci_dbg(dev, "%s bus mastering\n",
3669			enable ? "enabling" : "disabling");
3670		pci_write_config_word(dev, PCI_COMMAND, cmd);
3671	}
3672	dev->is_busmaster = enable;
3673}
3674
3675/**
3676 * pcibios_setup - process "pci=" kernel boot arguments
3677 * @str: string used to pass in "pci=" kernel boot arguments
3678 *
3679 * Process kernel boot arguments.  This is the default implementation.
3680 * Architecture specific implementations can override this as necessary.
3681 */
3682char * __weak __init pcibios_setup(char *str)
3683{
3684	return str;
3685}
3686
3687/**
3688 * pcibios_set_master - enable PCI bus-mastering for device dev
3689 * @dev: the PCI device to enable
3690 *
3691 * Enables PCI bus-mastering for the device.  This is the default
3692 * implementation.  Architecture specific implementations can override
3693 * this if necessary.
3694 */
3695void __weak pcibios_set_master(struct pci_dev *dev)
3696{
3697	u8 lat;
3698
3699	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3700	if (pci_is_pcie(dev))
3701		return;
3702
3703	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3704	if (lat < 16)
3705		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3706	else if (lat > pcibios_max_latency)
3707		lat = pcibios_max_latency;
3708	else
3709		return;
3710
3711	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3712}
3713
3714/**
3715 * pci_set_master - enables bus-mastering for device dev
3716 * @dev: the PCI device to enable
3717 *
3718 * Enables bus-mastering on the device and calls pcibios_set_master()
3719 * to do the needed arch specific settings.
3720 */
3721void pci_set_master(struct pci_dev *dev)
3722{
3723	__pci_set_master(dev, true);
3724	pcibios_set_master(dev);
3725}
3726EXPORT_SYMBOL(pci_set_master);
3727
3728/**
3729 * pci_clear_master - disables bus-mastering for device dev
3730 * @dev: the PCI device to disable
3731 */
3732void pci_clear_master(struct pci_dev *dev)
3733{
3734	__pci_set_master(dev, false);
3735}
3736EXPORT_SYMBOL(pci_clear_master);
3737
3738/**
3739 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3740 * @dev: the PCI device for which MWI is to be enabled
3741 *
3742 * Helper function for pci_set_mwi.
3743 * Originally copied from drivers/net/acenic.c.
3744 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3745 *
3746 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3747 */
3748int pci_set_cacheline_size(struct pci_dev *dev)
3749{
3750	u8 cacheline_size;
3751
3752	if (!pci_cache_line_size)
3753		return -EINVAL;
3754
3755	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3756	   equal to or multiple of the right value. */
3757	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3758	if (cacheline_size >= pci_cache_line_size &&
3759	    (cacheline_size % pci_cache_line_size) == 0)
3760		return 0;
3761
3762	/* Write the correct value. */
3763	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3764	/* Read it back. */
3765	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3766	if (cacheline_size == pci_cache_line_size)
3767		return 0;
3768
3769	pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
3770		   pci_cache_line_size << 2);
3771
3772	return -EINVAL;
3773}
3774EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3775
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3776/**
3777 * pci_set_mwi - enables memory-write-invalidate PCI transaction
3778 * @dev: the PCI device for which MWI is enabled
3779 *
3780 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3781 *
3782 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3783 */
3784int pci_set_mwi(struct pci_dev *dev)
 
3785{
3786#ifdef PCI_DISABLE_MWI
3787	return 0;
3788#else
3789	int rc;
3790	u16 cmd;
3791
3792	rc = pci_set_cacheline_size(dev);
3793	if (rc)
3794		return rc;
3795
3796	pci_read_config_word(dev, PCI_COMMAND, &cmd);
3797	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
3798		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
3799		cmd |= PCI_COMMAND_INVALIDATE;
3800		pci_write_config_word(dev, PCI_COMMAND, cmd);
3801	}
 
3802	return 0;
3803#endif
3804}
3805EXPORT_SYMBOL(pci_set_mwi);
3806
3807/**
3808 * pcim_set_mwi - a device-managed pci_set_mwi()
3809 * @dev: the PCI device for which MWI is enabled
3810 *
3811 * Managed pci_set_mwi().
3812 *
3813 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3814 */
3815int pcim_set_mwi(struct pci_dev *dev)
3816{
3817	struct pci_devres *dr;
3818
3819	dr = find_pci_dr(dev);
3820	if (!dr)
3821		return -ENOMEM;
3822
3823	dr->mwi = 1;
3824	return pci_set_mwi(dev);
3825}
3826EXPORT_SYMBOL(pcim_set_mwi);
3827
3828/**
3829 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3830 * @dev: the PCI device for which MWI is enabled
3831 *
3832 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3833 * Callers are not required to check the return value.
3834 *
3835 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3836 */
3837int pci_try_set_mwi(struct pci_dev *dev)
3838{
3839#ifdef PCI_DISABLE_MWI
3840	return 0;
3841#else
3842	return pci_set_mwi(dev);
3843#endif
3844}
3845EXPORT_SYMBOL(pci_try_set_mwi);
3846
3847/**
3848 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3849 * @dev: the PCI device to disable
3850 *
3851 * Disables PCI Memory-Write-Invalidate transaction on the device
3852 */
3853void pci_clear_mwi(struct pci_dev *dev)
 
3854{
3855#ifndef PCI_DISABLE_MWI
3856	u16 cmd;
3857
3858	pci_read_config_word(dev, PCI_COMMAND, &cmd);
3859	if (cmd & PCI_COMMAND_INVALIDATE) {
3860		cmd &= ~PCI_COMMAND_INVALIDATE;
3861		pci_write_config_word(dev, PCI_COMMAND, cmd);
3862	}
3863#endif
3864}
3865EXPORT_SYMBOL(pci_clear_mwi);
3866
3867/**
3868 * pci_intx - enables/disables PCI INTx for device dev
3869 * @pdev: the PCI device to operate on
3870 * @enable: boolean: whether to enable or disable PCI INTx
3871 *
3872 * Enables/disables PCI INTx for device dev
3873 */
3874void pci_intx(struct pci_dev *pdev, int enable)
 
3875{
3876	u16 pci_command, new;
3877
3878	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3879
3880	if (enable)
3881		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3882	else
3883		new = pci_command | PCI_COMMAND_INTX_DISABLE;
 
3884
3885	if (new != pci_command) {
3886		struct pci_devres *dr;
3887
3888		pci_write_config_word(pdev, PCI_COMMAND, new);
3889
3890		dr = find_pci_dr(pdev);
3891		if (dr && !dr->restore_intx) {
3892			dr->restore_intx = 1;
3893			dr->orig_intx = !enable;
3894		}
3895	}
3896}
3897EXPORT_SYMBOL_GPL(pci_intx);
3898
3899static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3900{
3901	struct pci_bus *bus = dev->bus;
3902	bool mask_updated = true;
3903	u32 cmd_status_dword;
3904	u16 origcmd, newcmd;
3905	unsigned long flags;
3906	bool irq_pending;
3907
3908	/*
3909	 * We do a single dword read to retrieve both command and status.
3910	 * Document assumptions that make this possible.
3911	 */
3912	BUILD_BUG_ON(PCI_COMMAND % 4);
3913	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3914
3915	raw_spin_lock_irqsave(&pci_lock, flags);
3916
3917	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3918
3919	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3920
3921	/*
3922	 * Check interrupt status register to see whether our device
3923	 * triggered the interrupt (when masking) or the next IRQ is
3924	 * already pending (when unmasking).
3925	 */
3926	if (mask != irq_pending) {
3927		mask_updated = false;
3928		goto done;
3929	}
3930
3931	origcmd = cmd_status_dword;
3932	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3933	if (mask)
3934		newcmd |= PCI_COMMAND_INTX_DISABLE;
3935	if (newcmd != origcmd)
3936		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3937
3938done:
3939	raw_spin_unlock_irqrestore(&pci_lock, flags);
3940
3941	return mask_updated;
3942}
3943
3944/**
3945 * pci_check_and_mask_intx - mask INTx on pending interrupt
3946 * @dev: the PCI device to operate on
3947 *
3948 * Check if the device dev has its INTx line asserted, mask it and
3949 * return true in that case. False is returned if no interrupt was
3950 * pending.
3951 */
3952bool pci_check_and_mask_intx(struct pci_dev *dev)
3953{
3954	return pci_check_and_set_intx_mask(dev, true);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3955}
3956EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3957
3958/**
3959 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
3960 * @dev: the PCI device to operate on
3961 *
3962 * Check if the device dev has its INTx line asserted, unmask it if not
3963 * and return true. False is returned and the mask remains active if
3964 * there was still an interrupt pending.
3965 */
3966bool pci_check_and_unmask_intx(struct pci_dev *dev)
3967{
3968	return pci_check_and_set_intx_mask(dev, false);
3969}
3970EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3971
3972/**
3973 * pci_wait_for_pending_transaction - waits for pending transaction
3974 * @dev: the PCI device to operate on
3975 *
3976 * Return 0 if transaction is pending 1 otherwise.
3977 */
3978int pci_wait_for_pending_transaction(struct pci_dev *dev)
3979{
3980	if (!pci_is_pcie(dev))
3981		return 1;
3982
3983	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3984				    PCI_EXP_DEVSTA_TRPND);
3985}
3986EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3987
3988static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
3989{
3990	int delay = 1;
3991	u32 id;
 
 
3992
3993	/*
3994	 * After reset, the device should not silently discard config
3995	 * requests, but it may still indicate that it needs more time by
3996	 * responding to them with CRS completions.  The Root Port will
3997	 * generally synthesize ~0 data to complete the read (except when
3998	 * CRS SV is enabled and the read was for the Vendor ID; in that
3999	 * case it synthesizes 0x0001 data).
4000	 *
4001	 * Wait for the device to return a non-CRS completion.  Read the
4002	 * Command register instead of Vendor ID so we don't have to
4003	 * contend with the CRS SV value.
4004	 */
4005	pci_read_config_dword(dev, PCI_COMMAND, &id);
4006	while (id == ~0) {
4007		if (delay > timeout) {
4008			pci_warn(dev, "not ready %dms after %s; giving up\n",
4009				 delay - 1, reset_type);
4010			return -ENOTTY;
4011		}
4012
4013		if (delay > 1000)
4014			pci_info(dev, "not ready %dms after %s; waiting\n",
4015				 delay - 1, reset_type);
4016
4017		msleep(delay);
4018		delay *= 2;
4019		pci_read_config_dword(dev, PCI_COMMAND, &id);
4020	}
4021
4022	if (delay > 1000)
4023		pci_info(dev, "ready %dms after %s\n", delay - 1,
4024			 reset_type);
4025
4026	return 0;
4027}
 
 
4028
4029/**
4030 * pcie_has_flr - check if a device supports function level resets
4031 * @dev:	device to check
4032 *
4033 * Returns true if the device advertises support for PCIe function level
4034 * resets.
4035 */
4036static bool pcie_has_flr(struct pci_dev *dev)
4037{
4038	u32 cap;
4039
4040	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4041		return false;
4042
4043	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4044	return cap & PCI_EXP_DEVCAP_FLR;
4045}
4046
4047/**
4048 * pcie_flr - initiate a PCIe function level reset
4049 * @dev:	device to reset
4050 *
4051 * Initiate a function level reset on @dev.  The caller should ensure the
4052 * device supports FLR before calling this function, e.g. by using the
4053 * pcie_has_flr() helper.
4054 */
4055int pcie_flr(struct pci_dev *dev)
4056{
4057	if (!pci_wait_for_pending_transaction(dev))
4058		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4059
4060	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
4061
4062	/*
4063	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4064	 * 100ms, but may silently discard requests while the FLR is in
4065	 * progress.  Wait 100ms before trying to access the device.
4066	 */
4067	msleep(100);
4068
4069	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4070}
4071EXPORT_SYMBOL_GPL(pcie_flr);
4072
4073static int pci_af_flr(struct pci_dev *dev, int probe)
4074{
 
4075	int pos;
4076	u8 cap;
 
4077
4078	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4079	if (!pos)
4080		return -ENOTTY;
4081
4082	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4083		return -ENOTTY;
4084
4085	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4086	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4087		return -ENOTTY;
4088
4089	if (probe)
4090		return 0;
4091
4092	/*
4093	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4094	 * is used, so we use the conrol offset rather than status and shift
4095	 * the test bit to match.
4096	 */
4097	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4098				 PCI_AF_STATUS_TP << 8))
4099		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
 
 
 
 
4100
 
4101	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
4102
4103	/*
4104	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4105	 * updated 27 July 2006; a device must complete an FLR within
4106	 * 100ms, but may silently discard requests while the FLR is in
4107	 * progress.  Wait 100ms before trying to access the device.
4108	 */
4109	msleep(100);
4110
4111	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4112}
4113
4114/**
4115 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4116 * @dev: Device to reset.
4117 * @probe: If set, only check if the device can be reset this way.
4118 *
4119 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4120 * unset, it will be reinitialized internally when going from PCI_D3hot to
4121 * PCI_D0.  If that's the case and the device is not in a low-power state
4122 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4123 *
4124 * NOTE: This causes the caller to sleep for twice the device power transition
4125 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4126 * by default (i.e. unless the @dev's d3_delay field has a different value).
4127 * Moreover, only devices in D0 can be reset by this function.
4128 */
4129static int pci_pm_reset(struct pci_dev *dev, int probe)
4130{
4131	u16 csr;
4132
4133	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4134		return -ENOTTY;
4135
4136	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4137	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4138		return -ENOTTY;
4139
4140	if (probe)
4141		return 0;
4142
4143	if (dev->current_state != PCI_D0)
4144		return -EINVAL;
4145
4146	csr &= ~PCI_PM_CTRL_STATE_MASK;
4147	csr |= PCI_D3hot;
4148	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4149	pci_dev_d3_sleep(dev);
4150
4151	csr &= ~PCI_PM_CTRL_STATE_MASK;
4152	csr |= PCI_D0;
4153	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4154	pci_dev_d3_sleep(dev);
4155
4156	return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4157}
4158
4159void pci_reset_secondary_bus(struct pci_dev *dev)
4160{
4161	u16 ctrl;
4162
4163	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4164	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4165	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4166
4167	/*
4168	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4169	 * this to 2ms to ensure that we meet the minimum requirement.
4170	 */
4171	msleep(2);
4172
4173	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4174	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4175
4176	/*
4177	 * Trhfa for conventional PCI is 2^25 clock cycles.
4178	 * Assuming a minimum 33MHz clock this results in a 1s
4179	 * delay before we can consider subordinate devices to
4180	 * be re-initialized.  PCIe has some ways to shorten this,
4181	 * but we don't make use of them yet.
4182	 */
4183	ssleep(1);
4184}
4185
4186void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4187{
4188	pci_reset_secondary_bus(dev);
4189}
4190
4191/**
4192 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
4193 * @dev: Bridge device
4194 *
4195 * Use the bridge control register to assert reset on the secondary bus.
4196 * Devices on the secondary bus are left in power-on state.
4197 */
4198int pci_reset_bridge_secondary_bus(struct pci_dev *dev)
4199{
4200	pcibios_reset_secondary_bus(dev);
4201
4202	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4203}
4204EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
4205
4206static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4207{
4208	struct pci_dev *pdev;
4209
4210	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4211	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4212		return -ENOTTY;
4213
4214	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4215		if (pdev != dev)
4216			return -ENOTTY;
4217
4218	if (probe)
4219		return 0;
4220
4221	pci_reset_bridge_secondary_bus(dev->bus->self);
 
 
 
 
 
 
 
4222
4223	return 0;
4224}
4225
4226static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4227{
4228	int rc = -ENOTTY;
4229
4230	if (!hotplug || !try_module_get(hotplug->ops->owner))
4231		return rc;
4232
4233	if (hotplug->ops->reset_slot)
4234		rc = hotplug->ops->reset_slot(hotplug, probe);
 
 
 
4235
4236	module_put(hotplug->ops->owner);
 
 
4237
4238	return rc;
4239}
 
4240
4241static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4242{
4243	struct pci_dev *pdev;
4244
4245	if (dev->subordinate || !dev->slot ||
4246	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4247		return -ENOTTY;
4248
4249	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4250		if (pdev != dev && pdev->slot == dev->slot)
4251			return -ENOTTY;
4252
4253	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4254}
4255
4256static void pci_dev_lock(struct pci_dev *dev)
4257{
4258	pci_cfg_access_lock(dev);
4259	/* block PM suspend, driver probe, etc. */
4260	device_lock(&dev->dev);
4261}
4262
4263/* Return 1 on successful lock, 0 on contention */
4264static int pci_dev_trylock(struct pci_dev *dev)
4265{
4266	if (pci_cfg_access_trylock(dev)) {
4267		if (device_trylock(&dev->dev))
4268			return 1;
4269		pci_cfg_access_unlock(dev);
4270	}
4271
4272	return 0;
4273}
4274
4275static void pci_dev_unlock(struct pci_dev *dev)
4276{
4277	device_unlock(&dev->dev);
4278	pci_cfg_access_unlock(dev);
4279}
4280
4281static void pci_dev_save_and_disable(struct pci_dev *dev)
4282{
4283	const struct pci_error_handlers *err_handler =
4284			dev->driver ? dev->driver->err_handler : NULL;
4285
4286	/*
4287	 * dev->driver->err_handler->reset_prepare() is protected against
4288	 * races with ->remove() by the device lock, which must be held by
4289	 * the caller.
4290	 */
4291	if (err_handler && err_handler->reset_prepare)
4292		err_handler->reset_prepare(dev);
4293
4294	/*
4295	 * Wake-up device prior to save.  PM registers default to D0 after
4296	 * reset and a simple register restore doesn't reliably return
4297	 * to a non-D0 state anyway.
4298	 */
4299	pci_set_power_state(dev, PCI_D0);
4300
4301	pci_save_state(dev);
4302	/*
4303	 * Disable the device by clearing the Command register, except for
4304	 * INTx-disable which is set.  This not only disables MMIO and I/O port
4305	 * BARs, but also prevents the device from being Bus Master, preventing
4306	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
4307	 * compliant devices, INTx-disable prevents legacy interrupts.
4308	 */
4309	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4310}
4311
4312static void pci_dev_restore(struct pci_dev *dev)
4313{
4314	const struct pci_error_handlers *err_handler =
4315			dev->driver ? dev->driver->err_handler : NULL;
4316
4317	pci_restore_state(dev);
4318
4319	/*
4320	 * dev->driver->err_handler->reset_done() is protected against
4321	 * races with ->remove() by the device lock, which must be held by
4322	 * the caller.
4323	 */
4324	if (err_handler && err_handler->reset_done)
4325		err_handler->reset_done(dev);
4326}
4327
4328/**
4329 * __pci_reset_function_locked - reset a PCI device function while holding
4330 * the @dev mutex lock.
4331 * @dev: PCI device to reset
4332 *
4333 * Some devices allow an individual function to be reset without affecting
4334 * other functions in the same device.  The PCI device must be responsive
4335 * to PCI config space in order to use this function.
4336 *
4337 * The device function is presumed to be unused and the caller is holding
4338 * the device mutex lock when this function is called.
4339 * Resetting the device will make the contents of PCI configuration space
4340 * random, so any caller of this must be prepared to reinitialise the
4341 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4342 * etc.
4343 *
4344 * Returns 0 if the device function was successfully reset or negative if the
4345 * device doesn't support resetting a single function.
4346 */
4347int __pci_reset_function_locked(struct pci_dev *dev)
4348{
4349	int rc;
4350
4351	might_sleep();
4352
4353	/*
4354	 * A reset method returns -ENOTTY if it doesn't support this device
4355	 * and we should try the next method.
4356	 *
4357	 * If it returns 0 (success), we're finished.  If it returns any
4358	 * other error, we're also finished: this indicates that further
4359	 * reset mechanisms might be broken on the device.
4360	 */
4361	rc = pci_dev_specific_reset(dev, 0);
4362	if (rc != -ENOTTY)
4363		return rc;
4364	if (pcie_has_flr(dev)) {
4365		rc = pcie_flr(dev);
4366		if (rc != -ENOTTY)
4367			return rc;
4368	}
4369	rc = pci_af_flr(dev, 0);
4370	if (rc != -ENOTTY)
4371		return rc;
4372	rc = pci_pm_reset(dev, 0);
4373	if (rc != -ENOTTY)
4374		return rc;
4375	rc = pci_dev_reset_slot_function(dev, 0);
4376	if (rc != -ENOTTY)
4377		return rc;
4378	return pci_parent_bus_reset(dev, 0);
4379}
4380EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4381
4382/**
4383 * pci_probe_reset_function - check whether the device can be safely reset
4384 * @dev: PCI device to reset
4385 *
4386 * Some devices allow an individual function to be reset without affecting
4387 * other functions in the same device.  The PCI device must be responsive
4388 * to PCI config space in order to use this function.
4389 *
4390 * Returns 0 if the device function can be reset or negative if the
4391 * device doesn't support resetting a single function.
4392 */
4393int pci_probe_reset_function(struct pci_dev *dev)
4394{
4395	int rc;
4396
4397	might_sleep();
4398
4399	rc = pci_dev_specific_reset(dev, 1);
4400	if (rc != -ENOTTY)
4401		return rc;
4402	if (pcie_has_flr(dev))
4403		return 0;
4404	rc = pci_af_flr(dev, 1);
4405	if (rc != -ENOTTY)
4406		return rc;
4407	rc = pci_pm_reset(dev, 1);
4408	if (rc != -ENOTTY)
4409		return rc;
4410	rc = pci_dev_reset_slot_function(dev, 1);
4411	if (rc != -ENOTTY)
4412		return rc;
4413
4414	return pci_parent_bus_reset(dev, 1);
4415}
4416
4417/**
4418 * pci_reset_function - quiesce and reset a PCI device function
4419 * @dev: PCI device to reset
4420 *
4421 * Some devices allow an individual function to be reset without affecting
4422 * other functions in the same device.  The PCI device must be responsive
4423 * to PCI config space in order to use this function.
4424 *
4425 * This function does not just reset the PCI portion of a device, but
4426 * clears all the state associated with the device.  This function differs
4427 * from __pci_reset_function_locked() in that it saves and restores device state
4428 * over the reset and takes the PCI device lock.
4429 *
4430 * Returns 0 if the device function was successfully reset or negative if the
4431 * device doesn't support resetting a single function.
4432 */
4433int pci_reset_function(struct pci_dev *dev)
4434{
4435	int rc;
4436
4437	if (!dev->reset_fn)
4438		return -ENOTTY;
4439
4440	pci_dev_lock(dev);
4441	pci_dev_save_and_disable(dev);
4442
4443	rc = __pci_reset_function_locked(dev);
4444
4445	pci_dev_restore(dev);
4446	pci_dev_unlock(dev);
4447
4448	return rc;
4449}
4450EXPORT_SYMBOL_GPL(pci_reset_function);
4451
4452/**
4453 * pci_reset_function_locked - quiesce and reset a PCI device function
4454 * @dev: PCI device to reset
4455 *
4456 * Some devices allow an individual function to be reset without affecting
4457 * other functions in the same device.  The PCI device must be responsive
4458 * to PCI config space in order to use this function.
4459 *
4460 * This function does not just reset the PCI portion of a device, but
4461 * clears all the state associated with the device.  This function differs
4462 * from __pci_reset_function_locked() in that it saves and restores device state
4463 * over the reset.  It also differs from pci_reset_function() in that it
4464 * requires the PCI device lock to be held.
4465 *
4466 * Returns 0 if the device function was successfully reset or negative if the
4467 * device doesn't support resetting a single function.
4468 */
4469int pci_reset_function_locked(struct pci_dev *dev)
4470{
4471	int rc;
4472
4473	if (!dev->reset_fn)
4474		return -ENOTTY;
4475
4476	pci_dev_save_and_disable(dev);
4477
4478	rc = __pci_reset_function_locked(dev);
4479
4480	pci_dev_restore(dev);
4481
4482	return rc;
4483}
4484EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4485
4486/**
4487 * pci_try_reset_function - quiesce and reset a PCI device function
4488 * @dev: PCI device to reset
4489 *
4490 * Same as above, except return -EAGAIN if unable to lock device.
4491 */
4492int pci_try_reset_function(struct pci_dev *dev)
4493{
4494	int rc;
4495
4496	if (!dev->reset_fn)
4497		return -ENOTTY;
4498
4499	if (!pci_dev_trylock(dev))
4500		return -EAGAIN;
4501
4502	pci_dev_save_and_disable(dev);
4503	rc = __pci_reset_function_locked(dev);
4504	pci_dev_restore(dev);
4505	pci_dev_unlock(dev);
4506
4507	return rc;
4508}
4509EXPORT_SYMBOL_GPL(pci_try_reset_function);
4510
4511/* Do any devices on or below this bus prevent a bus reset? */
4512static bool pci_bus_resetable(struct pci_bus *bus)
4513{
4514	struct pci_dev *dev;
4515
4516
4517	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
4518		return false;
4519
4520	list_for_each_entry(dev, &bus->devices, bus_list) {
4521		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4522		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4523			return false;
4524	}
4525
4526	return true;
4527}
4528
4529/* Lock devices from the top of the tree down */
4530static void pci_bus_lock(struct pci_bus *bus)
4531{
4532	struct pci_dev *dev;
4533
4534	list_for_each_entry(dev, &bus->devices, bus_list) {
4535		pci_dev_lock(dev);
4536		if (dev->subordinate)
4537			pci_bus_lock(dev->subordinate);
4538	}
4539}
4540
4541/* Unlock devices from the bottom of the tree up */
4542static void pci_bus_unlock(struct pci_bus *bus)
4543{
4544	struct pci_dev *dev;
4545
4546	list_for_each_entry(dev, &bus->devices, bus_list) {
4547		if (dev->subordinate)
4548			pci_bus_unlock(dev->subordinate);
4549		pci_dev_unlock(dev);
4550	}
4551}
4552
4553/* Return 1 on successful lock, 0 on contention */
4554static int pci_bus_trylock(struct pci_bus *bus)
4555{
4556	struct pci_dev *dev;
4557
4558	list_for_each_entry(dev, &bus->devices, bus_list) {
4559		if (!pci_dev_trylock(dev))
4560			goto unlock;
4561		if (dev->subordinate) {
4562			if (!pci_bus_trylock(dev->subordinate)) {
4563				pci_dev_unlock(dev);
4564				goto unlock;
4565			}
4566		}
4567	}
4568	return 1;
4569
4570unlock:
4571	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4572		if (dev->subordinate)
4573			pci_bus_unlock(dev->subordinate);
4574		pci_dev_unlock(dev);
4575	}
4576	return 0;
4577}
4578
4579/* Do any devices on or below this slot prevent a bus reset? */
4580static bool pci_slot_resetable(struct pci_slot *slot)
4581{
4582	struct pci_dev *dev;
4583
4584	if (slot->bus->self &&
4585	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
4586		return false;
4587
4588	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4589		if (!dev->slot || dev->slot != slot)
4590			continue;
4591		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4592		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4593			return false;
4594	}
4595
4596	return true;
4597}
4598
4599/* Lock devices from the top of the tree down */
4600static void pci_slot_lock(struct pci_slot *slot)
4601{
4602	struct pci_dev *dev;
4603
4604	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4605		if (!dev->slot || dev->slot != slot)
4606			continue;
4607		pci_dev_lock(dev);
4608		if (dev->subordinate)
4609			pci_bus_lock(dev->subordinate);
4610	}
4611}
4612
4613/* Unlock devices from the bottom of the tree up */
4614static void pci_slot_unlock(struct pci_slot *slot)
4615{
4616	struct pci_dev *dev;
4617
4618	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4619		if (!dev->slot || dev->slot != slot)
4620			continue;
4621		if (dev->subordinate)
4622			pci_bus_unlock(dev->subordinate);
4623		pci_dev_unlock(dev);
4624	}
4625}
4626
4627/* Return 1 on successful lock, 0 on contention */
4628static int pci_slot_trylock(struct pci_slot *slot)
4629{
4630	struct pci_dev *dev;
4631
4632	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4633		if (!dev->slot || dev->slot != slot)
4634			continue;
4635		if (!pci_dev_trylock(dev))
4636			goto unlock;
4637		if (dev->subordinate) {
4638			if (!pci_bus_trylock(dev->subordinate)) {
4639				pci_dev_unlock(dev);
4640				goto unlock;
4641			}
4642		}
4643	}
4644	return 1;
4645
4646unlock:
4647	list_for_each_entry_continue_reverse(dev,
4648					     &slot->bus->devices, bus_list) {
4649		if (!dev->slot || dev->slot != slot)
4650			continue;
4651		if (dev->subordinate)
4652			pci_bus_unlock(dev->subordinate);
4653		pci_dev_unlock(dev);
4654	}
4655	return 0;
4656}
4657
4658/* Save and disable devices from the top of the tree down */
4659static void pci_bus_save_and_disable(struct pci_bus *bus)
4660{
4661	struct pci_dev *dev;
4662
4663	list_for_each_entry(dev, &bus->devices, bus_list) {
4664		pci_dev_lock(dev);
4665		pci_dev_save_and_disable(dev);
4666		pci_dev_unlock(dev);
4667		if (dev->subordinate)
4668			pci_bus_save_and_disable(dev->subordinate);
4669	}
4670}
4671
4672/*
4673 * Restore devices from top of the tree down - parent bridges need to be
4674 * restored before we can get to subordinate devices.
4675 */
4676static void pci_bus_restore(struct pci_bus *bus)
4677{
4678	struct pci_dev *dev;
4679
4680	list_for_each_entry(dev, &bus->devices, bus_list) {
4681		pci_dev_lock(dev);
4682		pci_dev_restore(dev);
4683		pci_dev_unlock(dev);
4684		if (dev->subordinate)
4685			pci_bus_restore(dev->subordinate);
4686	}
4687}
4688
4689/* Save and disable devices from the top of the tree down */
4690static void pci_slot_save_and_disable(struct pci_slot *slot)
4691{
4692	struct pci_dev *dev;
4693
4694	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4695		if (!dev->slot || dev->slot != slot)
4696			continue;
4697		pci_dev_save_and_disable(dev);
4698		if (dev->subordinate)
4699			pci_bus_save_and_disable(dev->subordinate);
4700	}
4701}
4702
4703/*
4704 * Restore devices from top of the tree down - parent bridges need to be
4705 * restored before we can get to subordinate devices.
4706 */
4707static void pci_slot_restore(struct pci_slot *slot)
4708{
4709	struct pci_dev *dev;
4710
4711	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4712		if (!dev->slot || dev->slot != slot)
4713			continue;
4714		pci_dev_lock(dev);
4715		pci_dev_restore(dev);
4716		pci_dev_unlock(dev);
4717		if (dev->subordinate)
4718			pci_bus_restore(dev->subordinate);
4719	}
4720}
4721
4722static int pci_slot_reset(struct pci_slot *slot, int probe)
4723{
4724	int rc;
4725
4726	if (!slot || !pci_slot_resetable(slot))
4727		return -ENOTTY;
4728
4729	if (!probe)
4730		pci_slot_lock(slot);
4731
4732	might_sleep();
4733
4734	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4735
4736	if (!probe)
4737		pci_slot_unlock(slot);
4738
4739	return rc;
4740}
4741
4742/**
4743 * pci_probe_reset_slot - probe whether a PCI slot can be reset
4744 * @slot: PCI slot to probe
4745 *
4746 * Return 0 if slot can be reset, negative if a slot reset is not supported.
4747 */
4748int pci_probe_reset_slot(struct pci_slot *slot)
4749{
4750	return pci_slot_reset(slot, 1);
4751}
4752EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4753
4754/**
4755 * pci_reset_slot - reset a PCI slot
4756 * @slot: PCI slot to reset
4757 *
4758 * A PCI bus may host multiple slots, each slot may support a reset mechanism
4759 * independent of other slots.  For instance, some slots may support slot power
4760 * control.  In the case of a 1:1 bus to slot architecture, this function may
4761 * wrap the bus reset to avoid spurious slot related events such as hotplug.
4762 * Generally a slot reset should be attempted before a bus reset.  All of the
4763 * function of the slot and any subordinate buses behind the slot are reset
4764 * through this function.  PCI config space of all devices in the slot and
4765 * behind the slot is saved before and restored after reset.
4766 *
4767 * Return 0 on success, non-zero on error.
4768 */
4769int pci_reset_slot(struct pci_slot *slot)
4770{
4771	int rc;
4772
4773	rc = pci_slot_reset(slot, 1);
4774	if (rc)
4775		return rc;
4776
4777	pci_slot_save_and_disable(slot);
4778
4779	rc = pci_slot_reset(slot, 0);
 
 
 
 
4780
4781	pci_slot_restore(slot);
4782
4783	return rc;
4784}
4785EXPORT_SYMBOL_GPL(pci_reset_slot);
4786
4787/**
4788 * pci_try_reset_slot - Try to reset a PCI slot
4789 * @slot: PCI slot to reset
4790 *
4791 * Same as above except return -EAGAIN if the slot cannot be locked
4792 */
4793int pci_try_reset_slot(struct pci_slot *slot)
4794{
4795	int rc;
4796
4797	rc = pci_slot_reset(slot, 1);
4798	if (rc)
4799		return rc;
4800
4801	pci_slot_save_and_disable(slot);
4802
4803	if (pci_slot_trylock(slot)) {
4804		might_sleep();
4805		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4806		pci_slot_unlock(slot);
4807	} else
4808		rc = -EAGAIN;
4809
4810	pci_slot_restore(slot);
4811
4812	return rc;
4813}
4814EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4815
4816static int pci_bus_reset(struct pci_bus *bus, int probe)
4817{
4818	if (!bus->self || !pci_bus_resetable(bus))
4819		return -ENOTTY;
4820
4821	if (probe)
4822		return 0;
4823
4824	pci_bus_lock(bus);
4825
4826	might_sleep();
4827
4828	pci_reset_bridge_secondary_bus(bus->self);
4829
4830	pci_bus_unlock(bus);
4831
4832	return 0;
4833}
4834
4835/**
4836 * pci_probe_reset_bus - probe whether a PCI bus can be reset
4837 * @bus: PCI bus to probe
4838 *
4839 * Return 0 if bus can be reset, negative if a bus reset is not supported.
4840 */
4841int pci_probe_reset_bus(struct pci_bus *bus)
4842{
4843	return pci_bus_reset(bus, 1);
4844}
4845EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4846
4847/**
4848 * pci_reset_bus - reset a PCI bus
4849 * @bus: top level PCI bus to reset
4850 *
4851 * Do a bus reset on the given bus and any subordinate buses, saving
4852 * and restoring state of all devices.
4853 *
4854 * Return 0 on success, non-zero on error.
4855 */
4856int pci_reset_bus(struct pci_bus *bus)
4857{
4858	int rc;
4859
4860	rc = pci_bus_reset(bus, 1);
4861	if (rc)
4862		return rc;
4863
4864	pci_bus_save_and_disable(bus);
4865
4866	rc = pci_bus_reset(bus, 0);
4867
4868	pci_bus_restore(bus);
4869
4870	return rc;
4871}
4872EXPORT_SYMBOL_GPL(pci_reset_bus);
4873
4874/**
4875 * pci_try_reset_bus - Try to reset a PCI bus
4876 * @bus: top level PCI bus to reset
4877 *
4878 * Same as above except return -EAGAIN if the bus cannot be locked
4879 */
4880int pci_try_reset_bus(struct pci_bus *bus)
4881{
4882	int rc;
4883
4884	rc = pci_bus_reset(bus, 1);
4885	if (rc)
4886		return rc;
4887
4888	pci_bus_save_and_disable(bus);
4889
4890	if (pci_bus_trylock(bus)) {
4891		might_sleep();
4892		pci_reset_bridge_secondary_bus(bus->self);
4893		pci_bus_unlock(bus);
4894	} else
4895		rc = -EAGAIN;
4896
4897	pci_bus_restore(bus);
4898
4899	return rc;
4900}
4901EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4902
4903/**
4904 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4905 * @dev: PCI device to query
4906 *
4907 * Returns mmrbc: maximum designed memory read count in bytes
4908 *    or appropriate error value.
4909 */
4910int pcix_get_max_mmrbc(struct pci_dev *dev)
4911{
4912	int cap;
4913	u32 stat;
4914
4915	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4916	if (!cap)
4917		return -EINVAL;
4918
4919	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4920		return -EINVAL;
4921
4922	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
4923}
4924EXPORT_SYMBOL(pcix_get_max_mmrbc);
4925
4926/**
4927 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4928 * @dev: PCI device to query
4929 *
4930 * Returns mmrbc: maximum memory read count in bytes
4931 *    or appropriate error value.
4932 */
4933int pcix_get_mmrbc(struct pci_dev *dev)
4934{
4935	int cap;
4936	u16 cmd;
4937
4938	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4939	if (!cap)
4940		return -EINVAL;
4941
4942	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4943		return -EINVAL;
4944
4945	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
4946}
4947EXPORT_SYMBOL(pcix_get_mmrbc);
4948
4949/**
4950 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4951 * @dev: PCI device to query
4952 * @mmrbc: maximum memory read count in bytes
4953 *    valid values are 512, 1024, 2048, 4096
4954 *
4955 * If possible sets maximum memory read byte count, some bridges have erratas
4956 * that prevent this.
4957 */
4958int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4959{
4960	int cap;
4961	u32 stat, v, o;
4962	u16 cmd;
4963
4964	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
4965		return -EINVAL;
4966
4967	v = ffs(mmrbc) - 10;
4968
4969	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4970	if (!cap)
4971		return -EINVAL;
4972
4973	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
4974		return -EINVAL;
4975
4976	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
4977		return -E2BIG;
4978
4979	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4980		return -EINVAL;
4981
4982	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
4983	if (o != v) {
4984		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
 
4985			return -EIO;
4986
4987		cmd &= ~PCI_X_CMD_MAX_READ;
4988		cmd |= v << 2;
4989		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
4990			return -EIO;
4991	}
4992	return 0;
4993}
4994EXPORT_SYMBOL(pcix_set_mmrbc);
4995
4996/**
4997 * pcie_get_readrq - get PCI Express read request size
4998 * @dev: PCI device to query
4999 *
5000 * Returns maximum memory read request in bytes
5001 *    or appropriate error value.
5002 */
5003int pcie_get_readrq(struct pci_dev *dev)
5004{
 
5005	u16 ctl;
5006
5007	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
 
 
 
 
5008
5009	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5010}
5011EXPORT_SYMBOL(pcie_get_readrq);
5012
5013/**
5014 * pcie_set_readrq - set PCI Express maximum memory read request
5015 * @dev: PCI device to query
5016 * @rq: maximum memory read count in bytes
5017 *    valid values are 128, 256, 512, 1024, 2048, 4096
5018 *
5019 * If possible sets maximum memory read request in bytes
5020 */
5021int pcie_set_readrq(struct pci_dev *dev, int rq)
5022{
5023	u16 v;
 
5024
5025	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5026		return -EINVAL;
 
 
 
 
 
 
5027
5028	/*
5029	 * If using the "performance" PCIe config, we clamp the
5030	 * read rq size to the max packet size to prevent the
5031	 * host bridge generating requests larger than we can
5032	 * cope with
5033	 */
5034	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5035		int mps = pcie_get_mps(dev);
5036
5037		if (mps < rq)
5038			rq = mps;
 
 
5039	}
5040
5041	v = (ffs(rq) - 8) << 12;
5042
5043	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5044						  PCI_EXP_DEVCTL_READRQ, v);
5045}
5046EXPORT_SYMBOL(pcie_set_readrq);
5047
5048/**
5049 * pcie_get_mps - get PCI Express maximum payload size
5050 * @dev: PCI device to query
5051 *
5052 * Returns maximum payload size in bytes
 
5053 */
5054int pcie_get_mps(struct pci_dev *dev)
5055{
 
5056	u16 ctl;
5057
5058	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
5059
5060	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
 
 
 
 
5061}
5062EXPORT_SYMBOL(pcie_get_mps);
5063
5064/**
5065 * pcie_set_mps - set PCI Express maximum payload size
5066 * @dev: PCI device to query
5067 * @mps: maximum payload size in bytes
5068 *    valid values are 128, 256, 512, 1024, 2048, 4096
5069 *
5070 * If possible sets maximum payload size
5071 */
5072int pcie_set_mps(struct pci_dev *dev, int mps)
5073{
5074	u16 v;
 
5075
5076	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5077		return -EINVAL;
5078
5079	v = ffs(mps) - 8;
5080	if (v > dev->pcie_mpss)
5081		return -EINVAL;
5082	v <<= 5;
5083
5084	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5085						  PCI_EXP_DEVCTL_PAYLOAD, v);
5086}
5087EXPORT_SYMBOL(pcie_set_mps);
5088
5089/**
5090 * pcie_get_minimum_link - determine minimum link settings of a PCI device
5091 * @dev: PCI device to query
5092 * @speed: storage for minimum speed
5093 * @width: storage for minimum width
5094 *
5095 * This function will walk up the PCI device chain and determine the minimum
5096 * link width and speed of the device.
5097 */
5098int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
5099			  enum pcie_link_width *width)
5100{
5101	int ret;
5102
5103	*speed = PCI_SPEED_UNKNOWN;
5104	*width = PCIE_LNK_WIDTH_UNKNOWN;
5105
5106	while (dev) {
5107		u16 lnksta;
5108		enum pci_bus_speed next_speed;
5109		enum pcie_link_width next_width;
5110
5111		ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5112		if (ret)
5113			return ret;
5114
5115		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5116		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5117			PCI_EXP_LNKSTA_NLW_SHIFT;
5118
5119		if (next_speed < *speed)
5120			*speed = next_speed;
5121
5122		if (next_width < *width)
5123			*width = next_width;
5124
5125		dev = dev->bus->self;
5126	}
5127
5128	return 0;
5129}
5130EXPORT_SYMBOL(pcie_get_minimum_link);
5131
5132/**
5133 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5134 *			      device and its bandwidth limitation
5135 * @dev: PCI device to query
5136 * @limiting_dev: storage for device causing the bandwidth limitation
5137 * @speed: storage for speed of limiting device
5138 * @width: storage for width of limiting device
5139 *
5140 * Walk up the PCI device chain and find the point where the minimum
5141 * bandwidth is available.  Return the bandwidth available there and (if
5142 * limiting_dev, speed, and width pointers are supplied) information about
5143 * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5144 * raw bandwidth.
5145 */
5146u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5147			     enum pci_bus_speed *speed,
5148			     enum pcie_link_width *width)
5149{
5150	u16 lnksta;
5151	enum pci_bus_speed next_speed;
5152	enum pcie_link_width next_width;
5153	u32 bw, next_bw;
5154
5155	if (speed)
5156		*speed = PCI_SPEED_UNKNOWN;
5157	if (width)
5158		*width = PCIE_LNK_WIDTH_UNKNOWN;
5159
5160	bw = 0;
5161
5162	while (dev) {
5163		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5164
5165		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5166		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5167			PCI_EXP_LNKSTA_NLW_SHIFT;
5168
5169		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5170
5171		/* Check if current device limits the total bandwidth */
5172		if (!bw || next_bw <= bw) {
5173			bw = next_bw;
5174
5175			if (limiting_dev)
5176				*limiting_dev = dev;
5177			if (speed)
5178				*speed = next_speed;
5179			if (width)
5180				*width = next_width;
5181		}
5182
5183		dev = pci_upstream_bridge(dev);
5184	}
5185
5186	return bw;
5187}
5188EXPORT_SYMBOL(pcie_bandwidth_available);
5189
5190/**
5191 * pcie_get_speed_cap - query for the PCI device's link speed capability
5192 * @dev: PCI device to query
5193 *
5194 * Query the PCI device speed capability.  Return the maximum link speed
5195 * supported by the device.
5196 */
5197enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5198{
5199	u32 lnkcap2, lnkcap;
5200
5201	/*
5202	 * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
5203	 * Speeds Vector in Link Capabilities 2 when supported, falling
5204	 * back to Max Link Speed in Link Capabilities otherwise.
5205	 */
5206	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5207	if (lnkcap2) { /* PCIe r3.0-compliant */
5208		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5209			return PCIE_SPEED_16_0GT;
5210		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5211			return PCIE_SPEED_8_0GT;
5212		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5213			return PCIE_SPEED_5_0GT;
5214		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5215			return PCIE_SPEED_2_5GT;
5216		return PCI_SPEED_UNKNOWN;
5217	}
5218
5219	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5220	if (lnkcap) {
5221		if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
5222			return PCIE_SPEED_16_0GT;
5223		else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
5224			return PCIE_SPEED_8_0GT;
5225		else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
5226			return PCIE_SPEED_5_0GT;
5227		else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
5228			return PCIE_SPEED_2_5GT;
5229	}
5230
5231	return PCI_SPEED_UNKNOWN;
5232}
5233
5234/**
5235 * pcie_get_width_cap - query for the PCI device's link width capability
5236 * @dev: PCI device to query
5237 *
5238 * Query the PCI device width capability.  Return the maximum link width
5239 * supported by the device.
5240 */
5241enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5242{
5243	u32 lnkcap;
5244
5245	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5246	if (lnkcap)
5247		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5248
5249	return PCIE_LNK_WIDTH_UNKNOWN;
5250}
5251
5252/**
5253 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5254 * @dev: PCI device
5255 * @speed: storage for link speed
5256 * @width: storage for link width
5257 *
5258 * Calculate a PCI device's link bandwidth by querying for its link speed
5259 * and width, multiplying them, and applying encoding overhead.  The result
5260 * is in Mb/s, i.e., megabits/second of raw bandwidth.
5261 */
5262u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5263			   enum pcie_link_width *width)
5264{
5265	*speed = pcie_get_speed_cap(dev);
5266	*width = pcie_get_width_cap(dev);
5267
5268	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5269		return 0;
5270
5271	return *width * PCIE_SPEED2MBS_ENC(*speed);
5272}
5273
5274/**
5275 * pcie_print_link_status - Report the PCI device's link speed and width
5276 * @dev: PCI device to query
5277 *
5278 * Report the available bandwidth at the device.  If this is less than the
5279 * device is capable of, report the device's maximum possible bandwidth and
5280 * the upstream link that limits its performance to less than that.
5281 */
5282void pcie_print_link_status(struct pci_dev *dev)
5283{
5284	enum pcie_link_width width, width_cap;
5285	enum pci_bus_speed speed, speed_cap;
5286	struct pci_dev *limiting_dev = NULL;
5287	u32 bw_avail, bw_cap;
5288
5289	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5290	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5291
5292	if (bw_avail >= bw_cap)
5293		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5294			 bw_cap / 1000, bw_cap % 1000,
5295			 PCIE_SPEED2STR(speed_cap), width_cap);
5296	else
5297		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5298			 bw_avail / 1000, bw_avail % 1000,
5299			 PCIE_SPEED2STR(speed), width,
5300			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5301			 bw_cap / 1000, bw_cap % 1000,
5302			 PCIE_SPEED2STR(speed_cap), width_cap);
5303}
5304EXPORT_SYMBOL(pcie_print_link_status);
5305
5306/**
5307 * pci_select_bars - Make BAR mask from the type of resource
5308 * @dev: the PCI device for which BAR mask is made
5309 * @flags: resource type mask to be selected
5310 *
5311 * This helper routine makes bar mask from the type of resource.
5312 */
5313int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5314{
5315	int i, bars = 0;
5316	for (i = 0; i < PCI_NUM_RESOURCES; i++)
5317		if (pci_resource_flags(dev, i) & flags)
5318			bars |= (1 << i);
5319	return bars;
5320}
5321EXPORT_SYMBOL(pci_select_bars);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5322
5323/* Some architectures require additional programming to enable VGA */
5324static arch_set_vga_state_t arch_set_vga_state;
5325
5326void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5327{
5328	arch_set_vga_state = func;	/* NULL disables */
5329}
5330
5331static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5332				  unsigned int command_bits, u32 flags)
5333{
5334	if (arch_set_vga_state)
5335		return arch_set_vga_state(dev, decode, command_bits,
5336						flags);
5337	return 0;
5338}
5339
5340/**
5341 * pci_set_vga_state - set VGA decode state on device and parents if requested
5342 * @dev: the PCI device
5343 * @decode: true = enable decoding, false = disable decoding
5344 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
5345 * @flags: traverse ancestors and change bridges
5346 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
5347 */
5348int pci_set_vga_state(struct pci_dev *dev, bool decode,
5349		      unsigned int command_bits, u32 flags)
5350{
5351	struct pci_bus *bus;
5352	struct pci_dev *bridge;
5353	u16 cmd;
5354	int rc;
5355
5356	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5357
5358	/* ARCH specific VGA enables */
5359	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5360	if (rc)
5361		return rc;
5362
5363	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5364		pci_read_config_word(dev, PCI_COMMAND, &cmd);
5365		if (decode == true)
5366			cmd |= command_bits;
5367		else
5368			cmd &= ~command_bits;
5369		pci_write_config_word(dev, PCI_COMMAND, cmd);
5370	}
5371
5372	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5373		return 0;
5374
5375	bus = dev->bus;
5376	while (bus) {
5377		bridge = bus->self;
5378		if (bridge) {
5379			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5380					     &cmd);
5381			if (decode == true)
5382				cmd |= PCI_BRIDGE_CTL_VGA;
5383			else
5384				cmd &= ~PCI_BRIDGE_CTL_VGA;
5385			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5386					      cmd);
5387		}
5388		bus = bus->parent;
5389	}
5390	return 0;
5391}
5392
5393/**
5394 * pci_add_dma_alias - Add a DMA devfn alias for a device
5395 * @dev: the PCI device for which alias is added
5396 * @devfn: alias slot and function
5397 *
5398 * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
5399 * It should be called early, preferably as PCI fixup header quirk.
5400 */
5401void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5402{
5403	if (!dev->dma_alias_mask)
5404		dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5405					      sizeof(long), GFP_KERNEL);
5406	if (!dev->dma_alias_mask) {
5407		pci_warn(dev, "Unable to allocate DMA alias mask\n");
5408		return;
5409	}
5410
5411	set_bit(devfn, dev->dma_alias_mask);
5412	pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5413		 PCI_SLOT(devfn), PCI_FUNC(devfn));
5414}
5415
5416bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5417{
5418	return (dev1->dma_alias_mask &&
5419		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5420	       (dev2->dma_alias_mask &&
5421		test_bit(dev1->devfn, dev2->dma_alias_mask));
5422}
5423
5424bool pci_device_is_present(struct pci_dev *pdev)
5425{
5426	u32 v;
5427
5428	if (pci_dev_is_disconnected(pdev))
5429		return false;
5430	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5431}
5432EXPORT_SYMBOL_GPL(pci_device_is_present);
5433
5434void pci_ignore_hotplug(struct pci_dev *dev)
5435{
5436	struct pci_dev *bridge = dev->bus->self;
5437
5438	dev->ignore_hotplug = 1;
5439	/* Propagate the "ignore hotplug" setting to the parent bridge. */
5440	if (bridge)
5441		bridge->ignore_hotplug = 1;
5442}
5443EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5444
5445resource_size_t __weak pcibios_default_alignment(void)
5446{
5447	return 0;
5448}
5449
5450#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
5451static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
5452static DEFINE_SPINLOCK(resource_alignment_lock);
5453
5454/**
5455 * pci_specified_resource_alignment - get resource alignment specified by user.
5456 * @dev: the PCI device to get
5457 * @resize: whether or not to change resources' size when reassigning alignment
5458 *
5459 * RETURNS: Resource alignment if it is specified.
5460 *          Zero if it is not specified.
5461 */
5462static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5463							bool *resize)
5464{
5465	int seg, bus, slot, func, align_order, count;
5466	unsigned short vendor, device, subsystem_vendor, subsystem_device;
5467	resource_size_t align = pcibios_default_alignment();
5468	char *p;
5469
5470	spin_lock(&resource_alignment_lock);
5471	p = resource_alignment_param;
5472	if (!*p && !align)
5473		goto out;
5474	if (pci_has_flag(PCI_PROBE_ONLY)) {
5475		align = 0;
5476		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5477		goto out;
5478	}
5479
5480	while (*p) {
5481		count = 0;
5482		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5483							p[count] == '@') {
5484			p += count + 1;
5485		} else {
5486			align_order = -1;
5487		}
5488		if (strncmp(p, "pci:", 4) == 0) {
5489			/* PCI vendor/device (subvendor/subdevice) ids are specified */
5490			p += 4;
5491			if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5492				&vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5493				if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5494					printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5495						p);
5496					break;
5497				}
5498				subsystem_vendor = subsystem_device = 0;
5499			}
5500			p += count;
5501			if ((!vendor || (vendor == dev->vendor)) &&
5502				(!device || (device == dev->device)) &&
5503				(!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5504				(!subsystem_device || (subsystem_device == dev->subsystem_device))) {
5505				*resize = true;
5506				if (align_order == -1)
5507					align = PAGE_SIZE;
5508				else
5509					align = 1 << align_order;
5510				/* Found */
5511				break;
5512			}
5513		}
5514		else {
5515			if (sscanf(p, "%x:%x:%x.%x%n",
5516				&seg, &bus, &slot, &func, &count) != 4) {
5517				seg = 0;
5518				if (sscanf(p, "%x:%x.%x%n",
5519						&bus, &slot, &func, &count) != 3) {
5520					/* Invalid format */
5521					printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5522						p);
5523					break;
5524				}
5525			}
5526			p += count;
5527			if (seg == pci_domain_nr(dev->bus) &&
5528				bus == dev->bus->number &&
5529				slot == PCI_SLOT(dev->devfn) &&
5530				func == PCI_FUNC(dev->devfn)) {
5531				*resize = true;
5532				if (align_order == -1)
5533					align = PAGE_SIZE;
5534				else
5535					align = 1 << align_order;
5536				/* Found */
5537				break;
5538			}
 
 
5539		}
5540		if (*p != ';' && *p != ',') {
5541			/* End of param or invalid format */
5542			break;
5543		}
5544		p++;
5545	}
5546out:
5547	spin_unlock(&resource_alignment_lock);
5548	return align;
5549}
5550
5551static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
5552					   resource_size_t align, bool resize)
5553{
5554	struct resource *r = &dev->resource[bar];
5555	resource_size_t size;
5556
5557	if (!(r->flags & IORESOURCE_MEM))
5558		return;
5559
5560	if (r->flags & IORESOURCE_PCI_FIXED) {
5561		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
5562			 bar, r, (unsigned long long)align);
5563		return;
5564	}
5565
5566	size = resource_size(r);
5567	if (size >= align)
5568		return;
5569
5570	/*
5571	 * Increase the alignment of the resource.  There are two ways we
5572	 * can do this:
5573	 *
5574	 * 1) Increase the size of the resource.  BARs are aligned on their
5575	 *    size, so when we reallocate space for this resource, we'll
5576	 *    allocate it with the larger alignment.  This also prevents
5577	 *    assignment of any other BARs inside the alignment region, so
5578	 *    if we're requesting page alignment, this means no other BARs
5579	 *    will share the page.
5580	 *
5581	 *    The disadvantage is that this makes the resource larger than
5582	 *    the hardware BAR, which may break drivers that compute things
5583	 *    based on the resource size, e.g., to find registers at a
5584	 *    fixed offset before the end of the BAR.
5585	 *
5586	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
5587	 *    set r->start to the desired alignment.  By itself this
5588	 *    doesn't prevent other BARs being put inside the alignment
5589	 *    region, but if we realign *every* resource of every device in
5590	 *    the system, none of them will share an alignment region.
5591	 *
5592	 * When the user has requested alignment for only some devices via
5593	 * the "pci=resource_alignment" argument, "resize" is true and we
5594	 * use the first method.  Otherwise we assume we're aligning all
5595	 * devices and we use the second.
5596	 */
5597
5598	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
5599		 bar, r, (unsigned long long)align);
5600
5601	if (resize) {
5602		r->start = 0;
5603		r->end = align - 1;
5604	} else {
5605		r->flags &= ~IORESOURCE_SIZEALIGN;
5606		r->flags |= IORESOURCE_STARTALIGN;
5607		r->start = align;
5608		r->end = r->start + size - 1;
5609	}
5610	r->flags |= IORESOURCE_UNSET;
5611}
5612
5613/*
5614 * This function disables memory decoding and releases memory resources
5615 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
5616 * It also rounds up size to specified alignment.
5617 * Later on, the kernel will assign page-aligned memory resource back
5618 * to the device.
5619 */
5620void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5621{
5622	int i;
5623	struct resource *r;
5624	resource_size_t align;
5625	u16 command;
5626	bool resize = false;
5627
5628	/*
5629	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
5630	 * 3.4.1.11.  Their resources are allocated from the space
5631	 * described by the VF BARx register in the PF's SR-IOV capability.
5632	 * We can't influence their alignment here.
5633	 */
5634	if (dev->is_virtfn)
5635		return;
5636
5637	/* check if specified PCI is target device to reassign */
5638	align = pci_specified_resource_alignment(dev, &resize);
5639	if (!align)
5640		return;
5641
5642	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5643	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
5644		pci_warn(dev, "Can't reassign resources to host bridge\n");
5645		return;
5646	}
5647
5648	pci_read_config_word(dev, PCI_COMMAND, &command);
5649	command &= ~PCI_COMMAND_MEMORY;
5650	pci_write_config_word(dev, PCI_COMMAND, command);
5651
5652	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
5653		pci_request_resource_alignment(dev, i, align, resize);
5654
5655	/*
5656	 * Need to disable bridge's resource window,
5657	 * to enable the kernel to reassign new resource
5658	 * window later on.
5659	 */
5660	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5661	    (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5662		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5663			r = &dev->resource[i];
5664			if (!(r->flags & IORESOURCE_MEM))
5665				continue;
5666			r->flags |= IORESOURCE_UNSET;
5667			r->end = resource_size(r) - 1;
5668			r->start = 0;
5669		}
5670		pci_disable_bridge_window(dev);
5671	}
5672}
5673
5674static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
5675{
5676	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5677		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5678	spin_lock(&resource_alignment_lock);
5679	strncpy(resource_alignment_param, buf, count);
5680	resource_alignment_param[count] = '\0';
5681	spin_unlock(&resource_alignment_lock);
5682	return count;
5683}
5684
5685static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
5686{
5687	size_t count;
5688	spin_lock(&resource_alignment_lock);
5689	count = snprintf(buf, size, "%s", resource_alignment_param);
5690	spin_unlock(&resource_alignment_lock);
5691	return count;
5692}
5693
5694static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5695{
5696	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5697}
5698
5699static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5700					const char *buf, size_t count)
5701{
5702	return pci_set_resource_alignment_param(buf, count);
5703}
5704
5705static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
5706					pci_resource_alignment_store);
5707
5708static int __init pci_resource_alignment_sysfs_init(void)
5709{
5710	return bus_create_file(&pci_bus_type,
5711					&bus_attr_resource_alignment);
5712}
 
5713late_initcall(pci_resource_alignment_sysfs_init);
5714
5715static void pci_no_domains(void)
5716{
5717#ifdef CONFIG_PCI_DOMAINS
5718	pci_domains_supported = 0;
5719#endif
5720}
5721
5722#ifdef CONFIG_PCI_DOMAINS
5723static atomic_t __domain_nr = ATOMIC_INIT(-1);
5724
5725int pci_get_new_domain_nr(void)
5726{
5727	return atomic_inc_return(&__domain_nr);
5728}
5729
5730#ifdef CONFIG_PCI_DOMAINS_GENERIC
5731static int of_pci_bus_find_domain_nr(struct device *parent)
5732{
5733	static int use_dt_domains = -1;
5734	int domain = -1;
5735
5736	if (parent)
5737		domain = of_get_pci_domain_nr(parent->of_node);
5738	/*
5739	 * Check DT domain and use_dt_domains values.
5740	 *
5741	 * If DT domain property is valid (domain >= 0) and
5742	 * use_dt_domains != 0, the DT assignment is valid since this means
5743	 * we have not previously allocated a domain number by using
5744	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
5745	 * 1, to indicate that we have just assigned a domain number from
5746	 * DT.
5747	 *
5748	 * If DT domain property value is not valid (ie domain < 0), and we
5749	 * have not previously assigned a domain number from DT
5750	 * (use_dt_domains != 1) we should assign a domain number by
5751	 * using the:
5752	 *
5753	 * pci_get_new_domain_nr()
5754	 *
5755	 * API and update the use_dt_domains value to keep track of method we
5756	 * are using to assign domain numbers (use_dt_domains = 0).
5757	 *
5758	 * All other combinations imply we have a platform that is trying
5759	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
5760	 * which is a recipe for domain mishandling and it is prevented by
5761	 * invalidating the domain value (domain = -1) and printing a
5762	 * corresponding error.
5763	 */
5764	if (domain >= 0 && use_dt_domains) {
5765		use_dt_domains = 1;
5766	} else if (domain < 0 && use_dt_domains != 1) {
5767		use_dt_domains = 0;
5768		domain = pci_get_new_domain_nr();
5769	} else {
5770		if (parent)
5771			pr_err("Node %pOF has ", parent->of_node);
5772		pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
5773		domain = -1;
5774	}
5775
5776	return domain;
5777}
5778
5779int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5780{
5781	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5782			       acpi_pci_bus_find_domain_nr(bus);
5783}
5784#endif
5785#endif
5786
5787/**
5788 * pci_ext_cfg_avail - can we access extended PCI config space?
 
5789 *
5790 * Returns 1 if we can access PCI extended config space (offsets
5791 * greater than 0xff). This is the default implementation. Architecture
5792 * implementations can override this.
5793 */
5794int __weak pci_ext_cfg_avail(void)
5795{
5796	return 1;
5797}
5798
5799void __weak pci_fixup_cardbus(struct pci_bus *bus)
5800{
5801}
5802EXPORT_SYMBOL(pci_fixup_cardbus);
5803
5804static int __init pci_setup(char *str)
5805{
5806	while (str) {
5807		char *k = strchr(str, ',');
5808		if (k)
5809			*k++ = 0;
5810		if (*str && (str = pcibios_setup(str)) && *str) {
5811			if (!strcmp(str, "nomsi")) {
5812				pci_no_msi();
5813			} else if (!strcmp(str, "noaer")) {
5814				pci_no_aer();
5815			} else if (!strncmp(str, "realloc=", 8)) {
5816				pci_realloc_get_opt(str + 8);
5817			} else if (!strncmp(str, "realloc", 7)) {
5818				pci_realloc_get_opt("on");
5819			} else if (!strcmp(str, "nodomains")) {
5820				pci_no_domains();
5821			} else if (!strncmp(str, "noari", 5)) {
5822				pcie_ari_disabled = true;
5823			} else if (!strncmp(str, "cbiosize=", 9)) {
5824				pci_cardbus_io_size = memparse(str + 9, &str);
5825			} else if (!strncmp(str, "cbmemsize=", 10)) {
5826				pci_cardbus_mem_size = memparse(str + 10, &str);
5827			} else if (!strncmp(str, "resource_alignment=", 19)) {
5828				pci_set_resource_alignment_param(str + 19,
5829							strlen(str + 19));
5830			} else if (!strncmp(str, "ecrc=", 5)) {
5831				pcie_ecrc_get_policy(str + 5);
5832			} else if (!strncmp(str, "hpiosize=", 9)) {
5833				pci_hotplug_io_size = memparse(str + 9, &str);
5834			} else if (!strncmp(str, "hpmemsize=", 10)) {
5835				pci_hotplug_mem_size = memparse(str + 10, &str);
5836			} else if (!strncmp(str, "hpbussize=", 10)) {
5837				pci_hotplug_bus_size =
5838					simple_strtoul(str + 10, &str, 0);
5839				if (pci_hotplug_bus_size > 0xff)
5840					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5841			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5842				pcie_bus_config = PCIE_BUS_TUNE_OFF;
5843			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
5844				pcie_bus_config = PCIE_BUS_SAFE;
5845			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
5846				pcie_bus_config = PCIE_BUS_PERFORMANCE;
5847			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5848				pcie_bus_config = PCIE_BUS_PEER2PEER;
5849			} else if (!strncmp(str, "pcie_scan_all", 13)) {
5850				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
5851			} else {
5852				printk(KERN_ERR "PCI: Unknown option `%s'\n",
5853						str);
5854			}
5855		}
5856		str = k;
5857	}
5858	return 0;
5859}
5860early_param("pci", pci_setup);