Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * PCI Bus Services, see include/linux/pci.h for further explanation.
   4 *
   5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   6 * David Mosberger-Tang
   7 *
   8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   9 */
  10
  11#include <linux/acpi.h>
  12#include <linux/kernel.h>
  13#include <linux/delay.h>
  14#include <linux/dmi.h>
  15#include <linux/init.h>
  16#include <linux/of.h>
  17#include <linux/of_pci.h>
  18#include <linux/pci.h>
  19#include <linux/pm.h>
  20#include <linux/slab.h>
  21#include <linux/module.h>
  22#include <linux/spinlock.h>
  23#include <linux/string.h>
  24#include <linux/log2.h>
  25#include <linux/logic_pio.h>
  26#include <linux/pm_wakeup.h>
  27#include <linux/interrupt.h>
  28#include <linux/device.h>
  29#include <linux/pm_runtime.h>
  30#include <linux/pci_hotplug.h>
  31#include <linux/vmalloc.h>
  32#include <linux/pci-ats.h>
  33#include <asm/setup.h>
  34#include <asm/dma.h>
  35#include <linux/aer.h>
  36#include "pci.h"
  37
  38DEFINE_MUTEX(pci_slot_mutex);
  39
  40const char *pci_power_names[] = {
  41	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  42};
  43EXPORT_SYMBOL_GPL(pci_power_names);
  44
  45int isa_dma_bridge_buggy;
  46EXPORT_SYMBOL(isa_dma_bridge_buggy);
  47
  48int pci_pci_problems;
  49EXPORT_SYMBOL(pci_pci_problems);
  50
  51unsigned int pci_pm_d3_delay;
  52
  53static void pci_pme_list_scan(struct work_struct *work);
  54
  55static LIST_HEAD(pci_pme_list);
  56static DEFINE_MUTEX(pci_pme_list_mutex);
  57static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  58
  59struct pci_pme_device {
  60	struct list_head list;
  61	struct pci_dev *dev;
  62};
  63
  64#define PME_TIMEOUT 1000 /* How long between PME checks */
  65
  66static void pci_dev_d3_sleep(struct pci_dev *dev)
  67{
  68	unsigned int delay = dev->d3_delay;
  69
  70	if (delay < pci_pm_d3_delay)
  71		delay = pci_pm_d3_delay;
  72
  73	if (delay)
  74		msleep(delay);
  75}
  76
  77#ifdef CONFIG_PCI_DOMAINS
  78int pci_domains_supported = 1;
  79#endif
  80
  81#define DEFAULT_CARDBUS_IO_SIZE		(256)
  82#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  83/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  84unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  85unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  86
  87#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  88#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  89/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  90unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  91unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  92
  93#define DEFAULT_HOTPLUG_BUS_SIZE	1
  94unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
  95
  96enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
  97
  98/*
  99 * The default CLS is used if arch didn't set CLS explicitly and not
 100 * all pci devices agree on the same value.  Arch can override either
 101 * the dfl or actual value as it sees fit.  Don't forget this is
 102 * measured in 32-bit words, not bytes.
 103 */
 104u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
 105u8 pci_cache_line_size;
 106
 107/*
 108 * If we set up a device for bus mastering, we need to check the latency
 109 * timer as certain BIOSes forget to set it properly.
 110 */
 111unsigned int pcibios_max_latency = 255;
 112
 113/* If set, the PCIe ARI capability will not be used. */
 114static bool pcie_ari_disabled;
 115
 116/* If set, the PCIe ATS capability will not be used. */
 117static bool pcie_ats_disabled;
 118
 119/* If set, the PCI config space of each device is printed during boot. */
 120bool pci_early_dump;
 121
 122bool pci_ats_disabled(void)
 123{
 124	return pcie_ats_disabled;
 125}
 126
 127/* Disable bridge_d3 for all PCIe ports */
 128static bool pci_bridge_d3_disable;
 129/* Force bridge_d3 for all PCIe ports */
 130static bool pci_bridge_d3_force;
 131
 132static int __init pcie_port_pm_setup(char *str)
 133{
 134	if (!strcmp(str, "off"))
 135		pci_bridge_d3_disable = true;
 136	else if (!strcmp(str, "force"))
 137		pci_bridge_d3_force = true;
 138	return 1;
 139}
 140__setup("pcie_port_pm=", pcie_port_pm_setup);
 141
 142/* Time to wait after a reset for device to become responsive */
 143#define PCIE_RESET_READY_POLL_MS 60000
 144
 145/**
 146 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
 147 * @bus: pointer to PCI bus structure to search
 148 *
 149 * Given a PCI bus, returns the highest PCI bus number present in the set
 150 * including the given PCI bus and its list of child PCI buses.
 151 */
 152unsigned char pci_bus_max_busnr(struct pci_bus *bus)
 153{
 154	struct pci_bus *tmp;
 155	unsigned char max, n;
 156
 157	max = bus->busn_res.end;
 158	list_for_each_entry(tmp, &bus->children, node) {
 159		n = pci_bus_max_busnr(tmp);
 160		if (n > max)
 161			max = n;
 162	}
 163	return max;
 164}
 165EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 166
 167#ifdef CONFIG_HAS_IOMEM
 168void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 169{
 170	struct resource *res = &pdev->resource[bar];
 171
 172	/*
 173	 * Make sure the BAR is actually a memory resource, not an IO resource
 174	 */
 175	if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
 176		pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
 177		return NULL;
 178	}
 179	return ioremap_nocache(res->start, resource_size(res));
 180}
 181EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 182
 183void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
 184{
 185	/*
 186	 * Make sure the BAR is actually a memory resource, not an IO resource
 187	 */
 188	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 189		WARN_ON(1);
 190		return NULL;
 191	}
 192	return ioremap_wc(pci_resource_start(pdev, bar),
 193			  pci_resource_len(pdev, bar));
 194}
 195EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
 196#endif
 197
 
 198/**
 199 * pci_dev_str_match_path - test if a path string matches a device
 200 * @dev: the PCI device to test
 201 * @path: string to match the device against
 202 * @endptr: pointer to the string after the match
 203 *
 204 * Test if a string (typically from a kernel parameter) formatted as a
 205 * path of device/function addresses matches a PCI device. The string must
 206 * be of the form:
 207 *
 208 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
 209 *
 210 * A path for a device can be obtained using 'lspci -t'.  Using a path
 211 * is more robust against bus renumbering than using only a single bus,
 212 * device and function address.
 213 *
 214 * Returns 1 if the string matches the device, 0 if it does not and
 215 * a negative error code if it fails to parse the string.
 216 */
 217static int pci_dev_str_match_path(struct pci_dev *dev, const char *path,
 218				  const char **endptr)
 219{
 220	int ret;
 221	int seg, bus, slot, func;
 222	char *wpath, *p;
 223	char end;
 224
 225	*endptr = strchrnul(path, ';');
 226
 227	wpath = kmemdup_nul(path, *endptr - path, GFP_KERNEL);
 228	if (!wpath)
 229		return -ENOMEM;
 230
 231	while (1) {
 232		p = strrchr(wpath, '/');
 233		if (!p)
 234			break;
 235		ret = sscanf(p, "/%x.%x%c", &slot, &func, &end);
 236		if (ret != 2) {
 237			ret = -EINVAL;
 238			goto free_and_exit;
 239		}
 240
 241		if (dev->devfn != PCI_DEVFN(slot, func)) {
 242			ret = 0;
 243			goto free_and_exit;
 244		}
 245
 246		/*
 247		 * Note: we don't need to get a reference to the upstream
 248		 * bridge because we hold a reference to the top level
 249		 * device which should hold a reference to the bridge,
 250		 * and so on.
 251		 */
 252		dev = pci_upstream_bridge(dev);
 253		if (!dev) {
 254			ret = 0;
 255			goto free_and_exit;
 256		}
 257
 258		*p = 0;
 259	}
 260
 261	ret = sscanf(wpath, "%x:%x:%x.%x%c", &seg, &bus, &slot,
 262		     &func, &end);
 263	if (ret != 4) {
 264		seg = 0;
 265		ret = sscanf(wpath, "%x:%x.%x%c", &bus, &slot, &func, &end);
 266		if (ret != 3) {
 267			ret = -EINVAL;
 268			goto free_and_exit;
 269		}
 270	}
 271
 272	ret = (seg == pci_domain_nr(dev->bus) &&
 273	       bus == dev->bus->number &&
 274	       dev->devfn == PCI_DEVFN(slot, func));
 275
 276free_and_exit:
 277	kfree(wpath);
 278	return ret;
 279}
 280
 281/**
 282 * pci_dev_str_match - test if a string matches a device
 283 * @dev: the PCI device to test
 284 * @p: string to match the device against
 285 * @endptr: pointer to the string after the match
 286 *
 287 * Test if a string (typically from a kernel parameter) matches a specified
 288 * PCI device. The string may be of one of the following formats:
 289 *
 290 *   [<domain>:]<bus>:<device>.<func>[/<device>.<func>]*
 291 *   pci:<vendor>:<device>[:<subvendor>:<subdevice>]
 292 *
 293 * The first format specifies a PCI bus/device/function address which
 294 * may change if new hardware is inserted, if motherboard firmware changes,
 295 * or due to changes caused in kernel parameters. If the domain is
 296 * left unspecified, it is taken to be 0.  In order to be robust against
 297 * bus renumbering issues, a path of PCI device/function numbers may be used
 298 * to address the specific device.  The path for a device can be determined
 299 * through the use of 'lspci -t'.
 300 *
 301 * The second format matches devices using IDs in the configuration
 302 * space which may match multiple devices in the system. A value of 0
 303 * for any field will match all devices. (Note: this differs from
 304 * in-kernel code that uses PCI_ANY_ID which is ~0; this is for
 305 * legacy reasons and convenience so users don't have to specify
 306 * FFFFFFFFs on the command line.)
 307 *
 308 * Returns 1 if the string matches the device, 0 if it does not and
 309 * a negative error code if the string cannot be parsed.
 310 */
 311static int pci_dev_str_match(struct pci_dev *dev, const char *p,
 312			     const char **endptr)
 313{
 314	int ret;
 315	int count;
 316	unsigned short vendor, device, subsystem_vendor, subsystem_device;
 317
 318	if (strncmp(p, "pci:", 4) == 0) {
 319		/* PCI vendor/device (subvendor/subdevice) IDs are specified */
 320		p += 4;
 321		ret = sscanf(p, "%hx:%hx:%hx:%hx%n", &vendor, &device,
 322			     &subsystem_vendor, &subsystem_device, &count);
 323		if (ret != 4) {
 324			ret = sscanf(p, "%hx:%hx%n", &vendor, &device, &count);
 325			if (ret != 2)
 326				return -EINVAL;
 327
 328			subsystem_vendor = 0;
 329			subsystem_device = 0;
 330		}
 331
 332		p += count;
 333
 334		if ((!vendor || vendor == dev->vendor) &&
 335		    (!device || device == dev->device) &&
 336		    (!subsystem_vendor ||
 337			    subsystem_vendor == dev->subsystem_vendor) &&
 338		    (!subsystem_device ||
 339			    subsystem_device == dev->subsystem_device))
 340			goto found;
 341	} else {
 342		/*
 343		 * PCI Bus, Device, Function IDs are specified
 344		 * (optionally, may include a path of devfns following it)
 345		 */
 346		ret = pci_dev_str_match_path(dev, p, &p);
 347		if (ret < 0)
 348			return ret;
 349		else if (ret)
 350			goto found;
 351	}
 352
 353	*endptr = p;
 354	return 0;
 355
 356found:
 357	*endptr = p;
 358	return 1;
 359}
 360
 361static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 362				   u8 pos, int cap, int *ttl)
 363{
 364	u8 id;
 365	u16 ent;
 366
 367	pci_bus_read_config_byte(bus, devfn, pos, &pos);
 368
 369	while ((*ttl)--) {
 
 370		if (pos < 0x40)
 371			break;
 372		pos &= ~3;
 373		pci_bus_read_config_word(bus, devfn, pos, &ent);
 374
 375		id = ent & 0xff;
 376		if (id == 0xff)
 377			break;
 378		if (id == cap)
 379			return pos;
 380		pos = (ent >> 8);
 381	}
 382	return 0;
 383}
 384
 385static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 386			       u8 pos, int cap)
 387{
 388	int ttl = PCI_FIND_CAP_TTL;
 389
 390	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 391}
 392
 393int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 394{
 395	return __pci_find_next_cap(dev->bus, dev->devfn,
 396				   pos + PCI_CAP_LIST_NEXT, cap);
 397}
 398EXPORT_SYMBOL_GPL(pci_find_next_capability);
 399
 400static int __pci_bus_find_cap_start(struct pci_bus *bus,
 401				    unsigned int devfn, u8 hdr_type)
 402{
 403	u16 status;
 404
 405	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 406	if (!(status & PCI_STATUS_CAP_LIST))
 407		return 0;
 408
 409	switch (hdr_type) {
 410	case PCI_HEADER_TYPE_NORMAL:
 411	case PCI_HEADER_TYPE_BRIDGE:
 412		return PCI_CAPABILITY_LIST;
 413	case PCI_HEADER_TYPE_CARDBUS:
 414		return PCI_CB_CAPABILITY_LIST;
 
 
 415	}
 416
 417	return 0;
 418}
 419
 420/**
 421 * pci_find_capability - query for devices' capabilities
 422 * @dev: PCI device to query
 423 * @cap: capability code
 424 *
 425 * Tell if a device supports a given PCI capability.
 426 * Returns the address of the requested capability structure within the
 427 * device's PCI configuration space or 0 in case the device does not
 428 * support it.  Possible values for @cap include:
 429 *
 430 *  %PCI_CAP_ID_PM           Power Management
 431 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port
 432 *  %PCI_CAP_ID_VPD          Vital Product Data
 433 *  %PCI_CAP_ID_SLOTID       Slot Identification
 434 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 435 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap
 436 *  %PCI_CAP_ID_PCIX         PCI-X
 437 *  %PCI_CAP_ID_EXP          PCI Express
 438 */
 439int pci_find_capability(struct pci_dev *dev, int cap)
 440{
 441	int pos;
 442
 443	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 444	if (pos)
 445		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 446
 447	return pos;
 448}
 449EXPORT_SYMBOL(pci_find_capability);
 450
 451/**
 452 * pci_bus_find_capability - query for devices' capabilities
 453 * @bus: the PCI bus to query
 454 * @devfn: PCI device to query
 455 * @cap: capability code
 456 *
 457 * Like pci_find_capability() but works for PCI devices that do not have a
 458 * pci_dev structure set up yet.
 459 *
 460 * Returns the address of the requested capability structure within the
 461 * device's PCI configuration space or 0 in case the device does not
 462 * support it.
 463 */
 464int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 465{
 466	int pos;
 467	u8 hdr_type;
 468
 469	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 470
 471	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 472	if (pos)
 473		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 474
 475	return pos;
 476}
 477EXPORT_SYMBOL(pci_bus_find_capability);
 478
 479/**
 480 * pci_find_next_ext_capability - Find an extended capability
 481 * @dev: PCI device to query
 482 * @start: address at which to start looking (0 to start at beginning of list)
 483 * @cap: capability code
 484 *
 485 * Returns the address of the next matching extended capability structure
 486 * within the device's PCI configuration space or 0 if the device does
 487 * not support it.  Some capabilities can occur several times, e.g., the
 488 * vendor-specific capability, and this provides a way to find them all.
 
 
 
 
 489 */
 490int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
 491{
 492	u32 header;
 493	int ttl;
 494	int pos = PCI_CFG_SPACE_SIZE;
 495
 496	/* minimum 8 bytes per capability */
 497	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 498
 499	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 500		return 0;
 501
 502	if (start)
 503		pos = start;
 504
 505	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 506		return 0;
 507
 508	/*
 509	 * If we have no capabilities, this is indicated by cap ID,
 510	 * cap version and next pointer all being 0.
 511	 */
 512	if (header == 0)
 513		return 0;
 514
 515	while (ttl-- > 0) {
 516		if (PCI_EXT_CAP_ID(header) == cap && pos != start)
 517			return pos;
 518
 519		pos = PCI_EXT_CAP_NEXT(header);
 520		if (pos < PCI_CFG_SPACE_SIZE)
 521			break;
 522
 523		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 524			break;
 525	}
 526
 527	return 0;
 528}
 529EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
 530
 531/**
 532 * pci_find_ext_capability - Find an extended capability
 533 * @dev: PCI device to query
 534 * @cap: capability code
 
 535 *
 536 * Returns the address of the requested extended capability structure
 537 * within the device's PCI configuration space or 0 if the device does
 538 * not support it.  Possible values for @cap include:
 539 *
 540 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 541 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 542 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 543 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 544 */
 545int pci_find_ext_capability(struct pci_dev *dev, int cap)
 
 546{
 547	return pci_find_next_ext_capability(dev, 0, cap);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548}
 549EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 550
 551static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 552{
 553	int rc, ttl = PCI_FIND_CAP_TTL;
 554	u8 cap, mask;
 555
 556	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 557		mask = HT_3BIT_CAP_MASK;
 558	else
 559		mask = HT_5BIT_CAP_MASK;
 560
 561	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 562				      PCI_CAP_ID_HT, &ttl);
 563	while (pos) {
 564		rc = pci_read_config_byte(dev, pos + 3, &cap);
 565		if (rc != PCIBIOS_SUCCESSFUL)
 566			return 0;
 567
 568		if ((cap & mask) == ht_cap)
 569			return pos;
 570
 571		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 572					      pos + PCI_CAP_LIST_NEXT,
 573					      PCI_CAP_ID_HT, &ttl);
 574	}
 575
 576	return 0;
 577}
 578/**
 579 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 580 * @dev: PCI device to query
 581 * @pos: Position from which to continue searching
 582 * @ht_cap: Hypertransport capability code
 583 *
 584 * To be used in conjunction with pci_find_ht_capability() to search for
 585 * all capabilities matching @ht_cap. @pos should always be a value returned
 586 * from pci_find_ht_capability().
 587 *
 588 * NB. To be 100% safe against broken PCI devices, the caller should take
 589 * steps to avoid an infinite loop.
 590 */
 591int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 592{
 593	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 594}
 595EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 596
 597/**
 598 * pci_find_ht_capability - query a device's Hypertransport capabilities
 599 * @dev: PCI device to query
 600 * @ht_cap: Hypertransport capability code
 601 *
 602 * Tell if a device supports a given Hypertransport capability.
 603 * Returns an address within the device's PCI configuration space
 604 * or 0 in case the device does not support the request capability.
 605 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 606 * which has a Hypertransport capability matching @ht_cap.
 607 */
 608int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 609{
 610	int pos;
 611
 612	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 613	if (pos)
 614		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 615
 616	return pos;
 617}
 618EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 619
 620/**
 621 * pci_find_parent_resource - return resource region of parent bus of given
 622 *			      region
 623 * @dev: PCI device structure contains resources to be searched
 624 * @res: child resource record for which parent is sought
 625 *
 626 * For given resource region of given device, return the resource region of
 627 * parent bus the given region is contained in.
 
 628 */
 629struct resource *pci_find_parent_resource(const struct pci_dev *dev,
 630					  struct resource *res)
 631{
 632	const struct pci_bus *bus = dev->bus;
 633	struct resource *r;
 634	int i;
 
 635
 636	pci_bus_for_each_resource(bus, r, i) {
 637		if (!r)
 638			continue;
 639		if (resource_contains(r, res)) {
 640
 641			/*
 642			 * If the window is prefetchable but the BAR is
 643			 * not, the allocator made a mistake.
 644			 */
 645			if (r->flags & IORESOURCE_PREFETCH &&
 646			    !(res->flags & IORESOURCE_PREFETCH))
 647				return NULL;
 648
 649			/*
 650			 * If we're below a transparent bridge, there may
 651			 * be both a positively-decoded aperture and a
 652			 * subtractively-decoded region that contain the BAR.
 653			 * We want the positively-decoded one, so this depends
 654			 * on pci_bus_for_each_resource() giving us those
 655			 * first.
 656			 */
 657			return r;
 658		}
 659	}
 660	return NULL;
 661}
 662EXPORT_SYMBOL(pci_find_parent_resource);
 663
 664/**
 665 * pci_find_resource - Return matching PCI device resource
 666 * @dev: PCI device to query
 667 * @res: Resource to look for
 668 *
 669 * Goes over standard PCI resources (BARs) and checks if the given resource
 670 * is partially or fully contained in any of them. In that case the
 671 * matching resource is returned, %NULL otherwise.
 672 */
 673struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
 674{
 675	int i;
 676
 677	for (i = 0; i < PCI_ROM_RESOURCE; i++) {
 678		struct resource *r = &dev->resource[i];
 679
 680		if (r->start && resource_contains(r, res))
 681			return r;
 682	}
 683
 684	return NULL;
 685}
 686EXPORT_SYMBOL(pci_find_resource);
 687
 688/**
 689 * pci_find_pcie_root_port - return PCIe Root Port
 690 * @dev: PCI device to query
 691 *
 692 * Traverse up the parent chain and return the PCIe Root Port PCI Device
 693 * for a given PCI Device.
 694 */
 695struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
 696{
 697	struct pci_dev *bridge, *highest_pcie_bridge = dev;
 698
 699	bridge = pci_upstream_bridge(dev);
 700	while (bridge && pci_is_pcie(bridge)) {
 701		highest_pcie_bridge = bridge;
 702		bridge = pci_upstream_bridge(bridge);
 703	}
 704
 705	if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
 706		return NULL;
 707
 708	return highest_pcie_bridge;
 709}
 710EXPORT_SYMBOL(pci_find_pcie_root_port);
 711
 712/**
 713 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
 714 * @dev: the PCI device to operate on
 715 * @pos: config space offset of status word
 716 * @mask: mask of bit(s) to care about in status word
 717 *
 718 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
 719 */
 720int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
 721{
 722	int i;
 723
 724	/* Wait for Transaction Pending bit clean */
 725	for (i = 0; i < 4; i++) {
 726		u16 status;
 727		if (i)
 728			msleep((1 << (i - 1)) * 100);
 729
 730		pci_read_config_word(dev, pos, &status);
 731		if (!(status & mask))
 732			return 1;
 733	}
 734
 735	return 0;
 736}
 737
 738/**
 739 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
 740 * @dev: PCI device to have its BARs restored
 741 *
 742 * Restore the BAR values for a given device, so as to make it
 743 * accessible by its driver.
 744 */
 745static void pci_restore_bars(struct pci_dev *dev)
 
 746{
 747	int i;
 748
 749	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 750		pci_update_resource(dev, i);
 751}
 752
 753static const struct pci_platform_pm_ops *pci_platform_pm;
 754
 755int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
 756{
 757	if (!ops->is_manageable || !ops->set_state  || !ops->get_state ||
 758	    !ops->choose_state  || !ops->set_wakeup || !ops->need_resume)
 759		return -EINVAL;
 760	pci_platform_pm = ops;
 761	return 0;
 762}
 763
 764static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 765{
 766	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 767}
 768
 769static inline int platform_pci_set_power_state(struct pci_dev *dev,
 770					       pci_power_t t)
 771{
 772	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 773}
 774
 775static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
 776{
 777	return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
 778}
 779
 780static inline void platform_pci_refresh_power_state(struct pci_dev *dev)
 781{
 782	if (pci_platform_pm && pci_platform_pm->refresh_state)
 783		pci_platform_pm->refresh_state(dev);
 784}
 785
 786static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 787{
 788	return pci_platform_pm ?
 789			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 790}
 791
 792static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
 793{
 794	return pci_platform_pm ?
 795			pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
 796}
 797
 798static inline bool platform_pci_need_resume(struct pci_dev *dev)
 799{
 800	return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
 
 801}
 802
 803static inline bool platform_pci_bridge_d3(struct pci_dev *dev)
 804{
 805	return pci_platform_pm ? pci_platform_pm->bridge_d3(dev) : false;
 
 806}
 807
 808/**
 809 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 810 *			     given PCI device
 811 * @dev: PCI device to handle.
 812 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 813 *
 814 * RETURN VALUE:
 815 * -EINVAL if the requested state is invalid.
 816 * -EIO if device does not support PCI PM or its PM capabilities register has a
 817 * wrong version, or device doesn't support the requested state.
 818 * 0 if device already is in the requested state.
 819 * 0 if device's power state has been successfully changed.
 820 */
 821static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 822{
 823	u16 pmcsr;
 824	bool need_restore = false;
 825
 826	/* Check if we're already there */
 827	if (dev->current_state == state)
 828		return 0;
 829
 830	if (!dev->pm_cap)
 831		return -EIO;
 832
 833	if (state < PCI_D0 || state > PCI_D3hot)
 834		return -EINVAL;
 835
 836	/*
 837	 * Validate current state:
 838	 * Can enter D0 from any state, but if we can only go deeper
 839	 * to sleep if we're already in a low power state
 840	 */
 841	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 842	    && dev->current_state > state) {
 843		pci_err(dev, "invalid power transition (from state %d to %d)\n",
 844			dev->current_state, state);
 845		return -EINVAL;
 846	}
 847
 848	/* Check if this device supports the desired state */
 849	if ((state == PCI_D1 && !dev->d1_support)
 850	   || (state == PCI_D2 && !dev->d2_support))
 851		return -EIO;
 852
 853	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 854
 855	/*
 856	 * If we're (effectively) in D3, force entire word to 0.
 857	 * This doesn't affect PME_Status, disables PME_En, and
 858	 * sets PowerState to 0.
 859	 */
 860	switch (dev->current_state) {
 861	case PCI_D0:
 862	case PCI_D1:
 863	case PCI_D2:
 864		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 865		pmcsr |= state;
 866		break;
 867	case PCI_D3hot:
 868	case PCI_D3cold:
 869	case PCI_UNKNOWN: /* Boot-up */
 870		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 871		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 872			need_restore = true;
 873		/* Fall-through - force to D0 */
 874	default:
 875		pmcsr = 0;
 876		break;
 877	}
 878
 879	/* Enter specified state */
 880	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 881
 882	/*
 883	 * Mandatory power management transition delays; see PCI PM 1.1
 884	 * 5.6.1 table 18
 885	 */
 886	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 887		pci_dev_d3_sleep(dev);
 888	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 889		udelay(PCI_PM_D2_DELAY);
 890
 891	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 892	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 893	if (dev->current_state != state)
 894		pci_info_ratelimited(dev, "Refused to change power state, currently in D%d\n",
 895			 dev->current_state);
 896
 897	/*
 898	 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 899	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 900	 * from D3hot to D0 _may_ perform an internal reset, thereby
 901	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 902	 * For example, at least some versions of the 3c905B and the
 903	 * 3c556B exhibit this behaviour.
 904	 *
 905	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 906	 * devices in a D3hot state at boot.  Consequently, we need to
 907	 * restore at least the BARs so that the device will be
 908	 * accessible to its driver.
 909	 */
 910	if (need_restore)
 911		pci_restore_bars(dev);
 912
 913	if (dev->bus->self)
 914		pcie_aspm_pm_state_change(dev->bus->self);
 915
 916	return 0;
 917}
 918
 919/**
 920 * pci_update_current_state - Read power state of given device and cache it
 
 921 * @dev: PCI device to handle.
 922 * @state: State to cache in case the device doesn't have the PM capability
 923 *
 924 * The power state is read from the PMCSR register, which however is
 925 * inaccessible in D3cold.  The platform firmware is therefore queried first
 926 * to detect accessibility of the register.  In case the platform firmware
 927 * reports an incorrect state or the device isn't power manageable by the
 928 * platform at all, we try to detect D3cold by testing accessibility of the
 929 * vendor ID in config space.
 930 */
 931void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 932{
 933	if (platform_pci_get_power_state(dev) == PCI_D3cold ||
 934	    !pci_device_is_present(dev)) {
 935		dev->current_state = PCI_D3cold;
 936	} else if (dev->pm_cap) {
 937		u16 pmcsr;
 938
 939		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 940		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 941	} else {
 942		dev->current_state = state;
 943	}
 944}
 945
 946/**
 947 * pci_refresh_power_state - Refresh the given device's power state data
 948 * @dev: Target PCI device.
 949 *
 950 * Ask the platform to refresh the devices power state information and invoke
 951 * pci_update_current_state() to update its current PCI power state.
 952 */
 953void pci_refresh_power_state(struct pci_dev *dev)
 954{
 955	if (platform_pci_power_manageable(dev))
 956		platform_pci_refresh_power_state(dev);
 957
 958	pci_update_current_state(dev, dev->current_state);
 959}
 960
 961/**
 962 * pci_platform_power_transition - Use platform to change device power state
 963 * @dev: PCI device to handle.
 964 * @state: State to put the device into.
 965 */
 966static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 967{
 968	int error;
 969
 970	if (platform_pci_power_manageable(dev)) {
 971		error = platform_pci_set_power_state(dev, state);
 972		if (!error)
 973			pci_update_current_state(dev, state);
 974	} else
 975		error = -ENODEV;
 976
 977	if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
 978		dev->current_state = PCI_D0;
 
 979
 980	return error;
 981}
 982
 983/**
 984 * pci_wakeup - Wake up a PCI device
 985 * @pci_dev: Device to handle.
 986 * @ign: ignored parameter
 987 */
 988static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
 989{
 990	pci_wakeup_event(pci_dev);
 991	pm_request_resume(&pci_dev->dev);
 992	return 0;
 993}
 994
 995/**
 996 * pci_wakeup_bus - Walk given bus and wake up devices on it
 997 * @bus: Top bus of the subtree to walk.
 998 */
 999void pci_wakeup_bus(struct pci_bus *bus)
1000{
1001	if (bus)
1002		pci_walk_bus(bus, pci_wakeup, NULL);
1003}
1004
1005/**
1006 * __pci_start_power_transition - Start power transition of a PCI device
1007 * @dev: PCI device to handle.
1008 * @state: State to put the device into.
1009 */
1010static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
1011{
1012	if (state == PCI_D0) {
1013		pci_platform_power_transition(dev, PCI_D0);
1014		/*
1015		 * Mandatory power management transition delays, see
1016		 * PCI Express Base Specification Revision 2.0 Section
1017		 * 6.6.1: Conventional Reset.  Do not delay for
1018		 * devices powered on/off by corresponding bridge,
1019		 * because have already delayed for the bridge.
1020		 */
1021		if (dev->runtime_d3cold) {
1022			if (dev->d3cold_delay && !dev->imm_ready)
1023				msleep(dev->d3cold_delay);
1024			/*
1025			 * When powering on a bridge from D3cold, the
1026			 * whole hierarchy may be powered on into
1027			 * D0uninitialized state, resume them to give
1028			 * them a chance to suspend again
1029			 */
1030			pci_wakeup_bus(dev->subordinate);
1031		}
1032	}
1033}
1034
1035/**
1036 * __pci_dev_set_current_state - Set current state of a PCI device
1037 * @dev: Device to handle
1038 * @data: pointer to state to be set
1039 */
1040static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
1041{
1042	pci_power_t state = *(pci_power_t *)data;
1043
1044	dev->current_state = state;
1045	return 0;
1046}
1047
1048/**
1049 * pci_bus_set_current_state - Walk given bus and set current state of devices
1050 * @bus: Top bus of the subtree to walk.
1051 * @state: state to be set
1052 */
1053void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
1054{
1055	if (bus)
1056		pci_walk_bus(bus, __pci_dev_set_current_state, &state);
1057}
1058
1059/**
1060 * __pci_complete_power_transition - Complete power transition of a PCI device
1061 * @dev: PCI device to handle.
1062 * @state: State to put the device into.
1063 *
1064 * This function should not be called directly by device drivers.
1065 */
1066int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
1067{
1068	int ret;
1069
1070	if (state <= PCI_D0)
1071		return -EINVAL;
1072	ret = pci_platform_power_transition(dev, state);
1073	/* Power off the bridge may power off the whole hierarchy */
1074	if (!ret && state == PCI_D3cold)
1075		pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
1076	return ret;
1077}
1078EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
1079
1080/**
1081 * pci_set_power_state - Set the power state of a PCI device
1082 * @dev: PCI device to handle.
1083 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1084 *
1085 * Transition a device to a new power state, using the platform firmware and/or
1086 * the device's PCI PM registers.
1087 *
1088 * RETURN VALUE:
1089 * -EINVAL if the requested state is invalid.
1090 * -EIO if device does not support PCI PM or its PM capabilities register has a
1091 * wrong version, or device doesn't support the requested state.
1092 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
1093 * 0 if device already is in the requested state.
1094 * 0 if the transition is to D3 but D3 is not supported.
1095 * 0 if device's power state has been successfully changed.
1096 */
1097int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
1098{
1099	int error;
1100
1101	/* Bound the state we're entering */
1102	if (state > PCI_D3cold)
1103		state = PCI_D3cold;
1104	else if (state < PCI_D0)
1105		state = PCI_D0;
1106	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
1107
1108		/*
1109		 * If the device or the parent bridge do not support PCI
1110		 * PM, ignore the request if we're doing anything other
1111		 * than putting it into D0 (which would only happen on
1112		 * boot).
1113		 */
1114		return 0;
1115
1116	/* Check if we're already there */
1117	if (dev->current_state == state)
1118		return 0;
1119
1120	__pci_start_power_transition(dev, state);
1121
1122	/*
1123	 * This device is quirked not to be put into D3, so don't put it in
1124	 * D3
1125	 */
1126	if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
1127		return 0;
1128
1129	/*
1130	 * To put device in D3cold, we put device into D3hot in native
1131	 * way, then put device into D3cold with platform ops
1132	 */
1133	error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
1134					PCI_D3hot : state);
1135
1136	if (!__pci_complete_power_transition(dev, state))
1137		error = 0;
 
 
 
 
 
 
1138
1139	return error;
1140}
1141EXPORT_SYMBOL(pci_set_power_state);
1142
1143/**
1144 * pci_power_up - Put the given device into D0 forcibly
1145 * @dev: PCI device to power up
1146 */
1147void pci_power_up(struct pci_dev *dev)
1148{
1149	__pci_start_power_transition(dev, PCI_D0);
1150	pci_raw_set_power_state(dev, PCI_D0);
1151	pci_update_current_state(dev, PCI_D0);
1152}
1153
1154/**
1155 * pci_choose_state - Choose the power state of a PCI device
1156 * @dev: PCI device to be suspended
1157 * @state: target sleep state for the whole system. This is the value
1158 *	   that is passed to suspend() function.
1159 *
1160 * Returns PCI power state suitable for given device and given system
1161 * message.
1162 */
 
1163pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
1164{
1165	pci_power_t ret;
1166
1167	if (!dev->pm_cap)
1168		return PCI_D0;
1169
1170	ret = platform_pci_choose_state(dev);
1171	if (ret != PCI_POWER_ERROR)
1172		return ret;
1173
1174	switch (state.event) {
1175	case PM_EVENT_ON:
1176		return PCI_D0;
1177	case PM_EVENT_FREEZE:
1178	case PM_EVENT_PRETHAW:
1179		/* REVISIT both freeze and pre-thaw "should" use D0 */
1180	case PM_EVENT_SUSPEND:
1181	case PM_EVENT_HIBERNATE:
1182		return PCI_D3hot;
1183	default:
1184		pci_info(dev, "unrecognized suspend event %d\n",
1185			 state.event);
1186		BUG();
1187	}
1188	return PCI_D0;
1189}
 
1190EXPORT_SYMBOL(pci_choose_state);
1191
1192#define PCI_EXP_SAVE_REGS	7
1193
1194static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
1195						       u16 cap, bool extended)
1196{
1197	struct pci_cap_saved_state *tmp;
1198
1199	hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
1200		if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
1201			return tmp;
1202	}
1203	return NULL;
1204}
1205
1206struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1207{
1208	return _pci_find_saved_cap(dev, cap, false);
1209}
1210
1211struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1212{
1213	return _pci_find_saved_cap(dev, cap, true);
1214}
1215
1216static int pci_save_pcie_state(struct pci_dev *dev)
1217{
1218	int i = 0;
1219	struct pci_cap_saved_state *save_state;
1220	u16 *cap;
 
1221
1222	if (!pci_is_pcie(dev))
 
1223		return 0;
1224
1225	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1226	if (!save_state) {
1227		pci_err(dev, "buffer not found in %s\n", __func__);
1228		return -ENOMEM;
1229	}
1230
1231	cap = (u16 *)&save_state->cap.data[0];
1232	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1233	pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1234	pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1235	pcie_capability_read_word(dev, PCI_EXP_RTCTL,  &cap[i++]);
1236	pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1237	pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1238	pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
 
 
 
 
 
 
 
 
 
 
1239
1240	return 0;
1241}
1242
1243static void pci_restore_pcie_state(struct pci_dev *dev)
1244{
1245	int i = 0;
1246	struct pci_cap_saved_state *save_state;
1247	u16 *cap;
 
1248
1249	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
1250	if (!save_state)
 
1251		return;
1252
1253	cap = (u16 *)&save_state->cap.data[0];
1254	pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1255	pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1256	pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1257	pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1258	pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1259	pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1260	pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
 
 
 
 
 
 
 
 
 
 
1261}
1262
 
1263static int pci_save_pcix_state(struct pci_dev *dev)
1264{
1265	int pos;
1266	struct pci_cap_saved_state *save_state;
1267
1268	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1269	if (!pos)
1270		return 0;
1271
1272	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1273	if (!save_state) {
1274		pci_err(dev, "buffer not found in %s\n", __func__);
1275		return -ENOMEM;
1276	}
1277
1278	pci_read_config_word(dev, pos + PCI_X_CMD,
1279			     (u16 *)save_state->cap.data);
1280
1281	return 0;
1282}
1283
1284static void pci_restore_pcix_state(struct pci_dev *dev)
1285{
1286	int i = 0, pos;
1287	struct pci_cap_saved_state *save_state;
1288	u16 *cap;
1289
1290	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1291	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1292	if (!save_state || !pos)
1293		return;
1294	cap = (u16 *)&save_state->cap.data[0];
1295
1296	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
1297}
1298
1299static void pci_save_ltr_state(struct pci_dev *dev)
1300{
1301	int ltr;
1302	struct pci_cap_saved_state *save_state;
1303	u16 *cap;
1304
1305	if (!pci_is_pcie(dev))
1306		return;
1307
1308	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1309	if (!ltr)
1310		return;
1311
1312	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1313	if (!save_state) {
1314		pci_err(dev, "no suspend buffer for LTR; ASPM issues possible after resume\n");
1315		return;
1316	}
1317
1318	cap = (u16 *)&save_state->cap.data[0];
1319	pci_read_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, cap++);
1320	pci_read_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, cap++);
1321}
1322
1323static void pci_restore_ltr_state(struct pci_dev *dev)
1324{
1325	struct pci_cap_saved_state *save_state;
1326	int ltr;
1327	u16 *cap;
1328
1329	save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_LTR);
1330	ltr = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
1331	if (!save_state || !ltr)
1332		return;
1333
1334	cap = (u16 *)&save_state->cap.data[0];
1335	pci_write_config_word(dev, ltr + PCI_LTR_MAX_SNOOP_LAT, *cap++);
1336	pci_write_config_word(dev, ltr + PCI_LTR_MAX_NOSNOOP_LAT, *cap++);
1337}
1338
1339/**
1340 * pci_save_state - save the PCI configuration space of a device before
1341 *		    suspending
1342 * @dev: PCI device that we're dealing with
1343 */
1344int pci_save_state(struct pci_dev *dev)
 
1345{
1346	int i;
1347	/* XXX: 100% dword access ok here? */
1348	for (i = 0; i < 16; i++)
1349		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
1350	dev->state_saved = true;
1351
1352	i = pci_save_pcie_state(dev);
1353	if (i != 0)
1354		return i;
1355
1356	i = pci_save_pcix_state(dev);
1357	if (i != 0)
1358		return i;
1359
1360	pci_save_ltr_state(dev);
1361	pci_save_dpc_state(dev);
1362	return pci_save_vc_state(dev);
1363}
1364EXPORT_SYMBOL(pci_save_state);
1365
1366static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1367				     u32 saved_val, int retry, bool force)
1368{
1369	u32 val;
1370
1371	pci_read_config_dword(pdev, offset, &val);
1372	if (!force && val == saved_val)
1373		return;
1374
1375	for (;;) {
1376		pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
1377			offset, val, saved_val);
1378		pci_write_config_dword(pdev, offset, saved_val);
1379		if (retry-- <= 0)
1380			return;
1381
1382		pci_read_config_dword(pdev, offset, &val);
1383		if (val == saved_val)
1384			return;
1385
1386		mdelay(1);
1387	}
1388}
1389
1390static void pci_restore_config_space_range(struct pci_dev *pdev,
1391					   int start, int end, int retry,
1392					   bool force)
1393{
1394	int index;
1395
1396	for (index = end; index >= start; index--)
1397		pci_restore_config_dword(pdev, 4 * index,
1398					 pdev->saved_config_space[index],
1399					 retry, force);
1400}
1401
1402static void pci_restore_config_space(struct pci_dev *pdev)
1403{
1404	if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1405		pci_restore_config_space_range(pdev, 10, 15, 0, false);
1406		/* Restore BARs before the command register. */
1407		pci_restore_config_space_range(pdev, 4, 9, 10, false);
1408		pci_restore_config_space_range(pdev, 0, 3, 0, false);
1409	} else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
1410		pci_restore_config_space_range(pdev, 12, 15, 0, false);
1411
1412		/*
1413		 * Force rewriting of prefetch registers to avoid S3 resume
1414		 * issues on Intel PCI bridges that occur when these
1415		 * registers are not explicitly written.
1416		 */
1417		pci_restore_config_space_range(pdev, 9, 11, 0, true);
1418		pci_restore_config_space_range(pdev, 0, 8, 0, false);
1419	} else {
1420		pci_restore_config_space_range(pdev, 0, 15, 0, false);
1421	}
1422}
1423
1424static void pci_restore_rebar_state(struct pci_dev *pdev)
1425{
1426	unsigned int pos, nbars, i;
1427	u32 ctrl;
1428
1429	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
1430	if (!pos)
1431		return;
1432
1433	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1434	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
1435		    PCI_REBAR_CTRL_NBAR_SHIFT;
1436
1437	for (i = 0; i < nbars; i++, pos += 8) {
1438		struct resource *res;
1439		int bar_idx, size;
1440
1441		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
1442		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
1443		res = pdev->resource + bar_idx;
1444		size = ilog2(resource_size(res)) - 20;
1445		ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
1446		ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
1447		pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
1448	}
1449}
1450
1451/**
1452 * pci_restore_state - Restore the saved state of a PCI device
1453 * @dev: PCI device that we're dealing with
1454 */
1455void pci_restore_state(struct pci_dev *dev)
1456{
 
 
 
1457	if (!dev->state_saved)
1458		return;
1459
1460	/*
1461	 * Restore max latencies (in the LTR capability) before enabling
1462	 * LTR itself (in the PCIe capability).
1463	 */
1464	pci_restore_ltr_state(dev);
1465
1466	pci_restore_pcie_state(dev);
1467	pci_restore_pasid_state(dev);
1468	pci_restore_pri_state(dev);
1469	pci_restore_ats_state(dev);
1470	pci_restore_vc_state(dev);
1471	pci_restore_rebar_state(dev);
1472	pci_restore_dpc_state(dev);
1473
1474	pci_cleanup_aer_error_status_regs(dev);
1475
1476	pci_restore_config_space(dev);
1477
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1478	pci_restore_pcix_state(dev);
1479	pci_restore_msi_state(dev);
1480
1481	/* Restore ACS and IOV configuration state */
1482	pci_enable_acs(dev);
1483	pci_restore_iov_state(dev);
1484
1485	dev->state_saved = false;
1486}
1487EXPORT_SYMBOL(pci_restore_state);
1488
1489struct pci_saved_state {
1490	u32 config_space[16];
1491	struct pci_cap_saved_data cap[0];
1492};
1493
1494/**
1495 * pci_store_saved_state - Allocate and return an opaque struct containing
1496 *			   the device saved state.
1497 * @dev: PCI device that we're dealing with
1498 *
1499 * Return NULL if no state or error.
1500 */
1501struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1502{
1503	struct pci_saved_state *state;
1504	struct pci_cap_saved_state *tmp;
1505	struct pci_cap_saved_data *cap;
 
1506	size_t size;
1507
1508	if (!dev->state_saved)
1509		return NULL;
1510
1511	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1512
1513	hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
1514		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1515
1516	state = kzalloc(size, GFP_KERNEL);
1517	if (!state)
1518		return NULL;
1519
1520	memcpy(state->config_space, dev->saved_config_space,
1521	       sizeof(state->config_space));
1522
1523	cap = state->cap;
1524	hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
1525		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1526		memcpy(cap, &tmp->cap, len);
1527		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1528	}
1529	/* Empty cap_save terminates list */
1530
1531	return state;
1532}
1533EXPORT_SYMBOL_GPL(pci_store_saved_state);
1534
1535/**
1536 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1537 * @dev: PCI device that we're dealing with
1538 * @state: Saved state returned from pci_store_saved_state()
1539 */
1540int pci_load_saved_state(struct pci_dev *dev,
1541			 struct pci_saved_state *state)
1542{
1543	struct pci_cap_saved_data *cap;
1544
1545	dev->state_saved = false;
1546
1547	if (!state)
1548		return 0;
1549
1550	memcpy(dev->saved_config_space, state->config_space,
1551	       sizeof(state->config_space));
1552
1553	cap = state->cap;
1554	while (cap->size) {
1555		struct pci_cap_saved_state *tmp;
1556
1557		tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
1558		if (!tmp || tmp->cap.size != cap->size)
1559			return -EINVAL;
1560
1561		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1562		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1563		       sizeof(struct pci_cap_saved_data) + cap->size);
1564	}
1565
1566	dev->state_saved = true;
1567	return 0;
1568}
1569EXPORT_SYMBOL_GPL(pci_load_saved_state);
1570
1571/**
1572 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1573 *				   and free the memory allocated for it.
1574 * @dev: PCI device that we're dealing with
1575 * @state: Pointer to saved state returned from pci_store_saved_state()
1576 */
1577int pci_load_and_free_saved_state(struct pci_dev *dev,
1578				  struct pci_saved_state **state)
1579{
1580	int ret = pci_load_saved_state(dev, *state);
1581	kfree(*state);
1582	*state = NULL;
1583	return ret;
1584}
1585EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1586
1587int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1588{
1589	return pci_enable_resources(dev, bars);
1590}
1591
1592static int do_pci_enable_device(struct pci_dev *dev, int bars)
1593{
1594	int err;
1595	struct pci_dev *bridge;
1596	u16 cmd;
1597	u8 pin;
1598
1599	err = pci_set_power_state(dev, PCI_D0);
1600	if (err < 0 && err != -EIO)
1601		return err;
1602
1603	bridge = pci_upstream_bridge(dev);
1604	if (bridge)
1605		pcie_aspm_powersave_config_link(bridge);
1606
1607	err = pcibios_enable_device(dev, bars);
1608	if (err < 0)
1609		return err;
1610	pci_fixup_device(pci_fixup_enable, dev);
1611
1612	if (dev->msi_enabled || dev->msix_enabled)
1613		return 0;
1614
1615	pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1616	if (pin) {
1617		pci_read_config_word(dev, PCI_COMMAND, &cmd);
1618		if (cmd & PCI_COMMAND_INTX_DISABLE)
1619			pci_write_config_word(dev, PCI_COMMAND,
1620					      cmd & ~PCI_COMMAND_INTX_DISABLE);
1621	}
1622
1623	return 0;
1624}
1625
1626/**
1627 * pci_reenable_device - Resume abandoned device
1628 * @dev: PCI device to be resumed
1629 *
1630 * NOTE: This function is a backend of pci_default_resume() and is not supposed
1631 * to be called by normal code, write proper resume handler and use it instead.
1632 */
1633int pci_reenable_device(struct pci_dev *dev)
1634{
1635	if (pci_is_enabled(dev))
1636		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1637	return 0;
1638}
1639EXPORT_SYMBOL(pci_reenable_device);
1640
1641static void pci_enable_bridge(struct pci_dev *dev)
1642{
1643	struct pci_dev *bridge;
1644	int retval;
1645
1646	bridge = pci_upstream_bridge(dev);
1647	if (bridge)
1648		pci_enable_bridge(bridge);
1649
1650	if (pci_is_enabled(dev)) {
1651		if (!dev->is_busmaster)
1652			pci_set_master(dev);
1653		return;
1654	}
1655
1656	retval = pci_enable_device(dev);
1657	if (retval)
1658		pci_err(dev, "Error enabling bridge (%d), continuing\n",
1659			retval);
1660	pci_set_master(dev);
1661}
1662
1663static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
 
1664{
1665	struct pci_dev *bridge;
1666	int err;
1667	int i, bars = 0;
1668
1669	/*
1670	 * Power state could be unknown at this point, either due to a fresh
1671	 * boot or a device removal call.  So get the current power state
1672	 * so that things like MSI message writing will behave as expected
1673	 * (e.g. if the device really is in D0 at enable time).
1674	 */
1675	if (dev->pm_cap) {
1676		u16 pmcsr;
1677		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1678		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1679	}
1680
1681	if (atomic_inc_return(&dev->enable_cnt) > 1)
1682		return 0;		/* already enabled */
1683
1684	bridge = pci_upstream_bridge(dev);
1685	if (bridge)
1686		pci_enable_bridge(bridge);
1687
1688	/* only skip sriov related */
1689	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1690		if (dev->resource[i].flags & flags)
1691			bars |= (1 << i);
1692	for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
1693		if (dev->resource[i].flags & flags)
1694			bars |= (1 << i);
1695
1696	err = do_pci_enable_device(dev, bars);
1697	if (err < 0)
1698		atomic_dec(&dev->enable_cnt);
1699	return err;
1700}
1701
1702/**
1703 * pci_enable_device_io - Initialize a device for use with IO space
1704 * @dev: PCI device to be initialized
1705 *
1706 * Initialize device before it's used by a driver. Ask low-level code
1707 * to enable I/O resources. Wake up the device if it was suspended.
1708 * Beware, this function can fail.
1709 */
1710int pci_enable_device_io(struct pci_dev *dev)
1711{
1712	return pci_enable_device_flags(dev, IORESOURCE_IO);
1713}
1714EXPORT_SYMBOL(pci_enable_device_io);
1715
1716/**
1717 * pci_enable_device_mem - Initialize a device for use with Memory space
1718 * @dev: PCI device to be initialized
1719 *
1720 * Initialize device before it's used by a driver. Ask low-level code
1721 * to enable Memory resources. Wake up the device if it was suspended.
1722 * Beware, this function can fail.
1723 */
1724int pci_enable_device_mem(struct pci_dev *dev)
1725{
1726	return pci_enable_device_flags(dev, IORESOURCE_MEM);
1727}
1728EXPORT_SYMBOL(pci_enable_device_mem);
1729
1730/**
1731 * pci_enable_device - Initialize device before it's used by a driver.
1732 * @dev: PCI device to be initialized
1733 *
1734 * Initialize device before it's used by a driver. Ask low-level code
1735 * to enable I/O and memory. Wake up the device if it was suspended.
1736 * Beware, this function can fail.
1737 *
1738 * Note we don't actually enable the device many times if we call
1739 * this function repeatedly (we just increment the count).
1740 */
1741int pci_enable_device(struct pci_dev *dev)
1742{
1743	return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1744}
1745EXPORT_SYMBOL(pci_enable_device);
1746
1747/*
1748 * Managed PCI resources.  This manages device on/off, INTx/MSI/MSI-X
1749 * on/off and BAR regions.  pci_dev itself records MSI/MSI-X status, so
1750 * there's no need to track it separately.  pci_devres is initialized
1751 * when a device is enabled using managed PCI device enable interface.
1752 */
1753struct pci_devres {
1754	unsigned int enabled:1;
1755	unsigned int pinned:1;
1756	unsigned int orig_intx:1;
1757	unsigned int restore_intx:1;
1758	unsigned int mwi:1;
1759	u32 region_mask;
1760};
1761
1762static void pcim_release(struct device *gendev, void *res)
1763{
1764	struct pci_dev *dev = to_pci_dev(gendev);
1765	struct pci_devres *this = res;
1766	int i;
1767
1768	if (dev->msi_enabled)
1769		pci_disable_msi(dev);
1770	if (dev->msix_enabled)
1771		pci_disable_msix(dev);
1772
1773	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1774		if (this->region_mask & (1 << i))
1775			pci_release_region(dev, i);
1776
1777	if (this->mwi)
1778		pci_clear_mwi(dev);
1779
1780	if (this->restore_intx)
1781		pci_intx(dev, this->orig_intx);
1782
1783	if (this->enabled && !this->pinned)
1784		pci_disable_device(dev);
1785}
1786
1787static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
1788{
1789	struct pci_devres *dr, *new_dr;
1790
1791	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1792	if (dr)
1793		return dr;
1794
1795	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1796	if (!new_dr)
1797		return NULL;
1798	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1799}
1800
1801static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
1802{
1803	if (pci_is_managed(pdev))
1804		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1805	return NULL;
1806}
1807
1808/**
1809 * pcim_enable_device - Managed pci_enable_device()
1810 * @pdev: PCI device to be initialized
1811 *
1812 * Managed pci_enable_device().
1813 */
1814int pcim_enable_device(struct pci_dev *pdev)
1815{
1816	struct pci_devres *dr;
1817	int rc;
1818
1819	dr = get_pci_dr(pdev);
1820	if (unlikely(!dr))
1821		return -ENOMEM;
1822	if (dr->enabled)
1823		return 0;
1824
1825	rc = pci_enable_device(pdev);
1826	if (!rc) {
1827		pdev->is_managed = 1;
1828		dr->enabled = 1;
1829	}
1830	return rc;
1831}
1832EXPORT_SYMBOL(pcim_enable_device);
1833
1834/**
1835 * pcim_pin_device - Pin managed PCI device
1836 * @pdev: PCI device to pin
1837 *
1838 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1839 * driver detach.  @pdev must have been enabled with
1840 * pcim_enable_device().
1841 */
1842void pcim_pin_device(struct pci_dev *pdev)
1843{
1844	struct pci_devres *dr;
1845
1846	dr = find_pci_dr(pdev);
1847	WARN_ON(!dr || !dr->enabled);
1848	if (dr)
1849		dr->pinned = 1;
1850}
1851EXPORT_SYMBOL(pcim_pin_device);
1852
1853/*
1854 * pcibios_add_device - provide arch specific hooks when adding device dev
1855 * @dev: the PCI device being added
1856 *
1857 * Permits the platform to provide architecture specific functionality when
1858 * devices are added. This is the default implementation. Architecture
1859 * implementations can override this.
1860 */
1861int __weak pcibios_add_device(struct pci_dev *dev)
1862{
1863	return 0;
1864}
1865
1866/**
1867 * pcibios_release_device - provide arch specific hooks when releasing
1868 *			    device dev
1869 * @dev: the PCI device being released
1870 *
1871 * Permits the platform to provide architecture specific functionality when
1872 * devices are released. This is the default implementation. Architecture
1873 * implementations can override this.
1874 */
1875void __weak pcibios_release_device(struct pci_dev *dev) {}
1876
1877/**
1878 * pcibios_disable_device - disable arch specific PCI resources for device dev
1879 * @dev: the PCI device to disable
1880 *
1881 * Disables architecture specific PCI resources for the device. This
1882 * is the default implementation. Architecture implementations can
1883 * override this.
1884 */
1885void __weak pcibios_disable_device(struct pci_dev *dev) {}
1886
1887/**
1888 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1889 * @irq: ISA IRQ to penalize
1890 * @active: IRQ active or not
1891 *
1892 * Permits the platform to provide architecture-specific functionality when
1893 * penalizing ISA IRQs. This is the default implementation. Architecture
1894 * implementations can override this.
1895 */
1896void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1897
1898static void do_pci_disable_device(struct pci_dev *dev)
1899{
1900	u16 pci_command;
1901
1902	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1903	if (pci_command & PCI_COMMAND_MASTER) {
1904		pci_command &= ~PCI_COMMAND_MASTER;
1905		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1906	}
1907
1908	pcibios_disable_device(dev);
1909}
1910
1911/**
1912 * pci_disable_enabled_device - Disable device without updating enable_cnt
1913 * @dev: PCI device to disable
1914 *
1915 * NOTE: This function is a backend of PCI power management routines and is
1916 * not supposed to be called drivers.
1917 */
1918void pci_disable_enabled_device(struct pci_dev *dev)
1919{
1920	if (pci_is_enabled(dev))
1921		do_pci_disable_device(dev);
1922}
1923
1924/**
1925 * pci_disable_device - Disable PCI device after use
1926 * @dev: PCI device to be disabled
1927 *
1928 * Signal to the system that the PCI device is not in use by the system
1929 * anymore.  This only involves disabling PCI bus-mastering, if active.
1930 *
1931 * Note we don't actually disable the device until all callers of
1932 * pci_enable_device() have called pci_disable_device().
1933 */
1934void pci_disable_device(struct pci_dev *dev)
 
1935{
1936	struct pci_devres *dr;
1937
1938	dr = find_pci_dr(dev);
1939	if (dr)
1940		dr->enabled = 0;
1941
1942	dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1943		      "disabling already-disabled device");
1944
1945	if (atomic_dec_return(&dev->enable_cnt) != 0)
1946		return;
1947
1948	do_pci_disable_device(dev);
1949
1950	dev->is_busmaster = 0;
1951}
1952EXPORT_SYMBOL(pci_disable_device);
1953
1954/**
1955 * pcibios_set_pcie_reset_state - set reset state for device dev
1956 * @dev: the PCIe device reset
1957 * @state: Reset state to enter into
1958 *
1959 * Set the PCIe reset state for the device. This is the default
 
1960 * implementation. Architecture implementations can override this.
1961 */
1962int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1963					enum pcie_reset_state state)
1964{
1965	return -EINVAL;
1966}
1967
1968/**
1969 * pci_set_pcie_reset_state - set reset state for device dev
1970 * @dev: the PCIe device reset
1971 * @state: Reset state to enter into
1972 *
 
1973 * Sets the PCI reset state for the device.
1974 */
1975int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1976{
1977	return pcibios_set_pcie_reset_state(dev, state);
1978}
1979EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
1980
1981/**
1982 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
1983 * @dev: PCIe root port or event collector.
1984 */
1985void pcie_clear_root_pme_status(struct pci_dev *dev)
1986{
1987	pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
1988}
1989
1990/**
1991 * pci_check_pme_status - Check if given device has generated PME.
1992 * @dev: Device to check.
1993 *
1994 * Check the PME status of the device and if set, clear it and clear PME enable
1995 * (if set).  Return 'true' if PME status and PME enable were both set or
1996 * 'false' otherwise.
1997 */
1998bool pci_check_pme_status(struct pci_dev *dev)
1999{
2000	int pmcsr_pos;
2001	u16 pmcsr;
2002	bool ret = false;
2003
2004	if (!dev->pm_cap)
2005		return false;
2006
2007	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
2008	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
2009	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
2010		return false;
2011
2012	/* Clear PME status. */
2013	pmcsr |= PCI_PM_CTRL_PME_STATUS;
2014	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
2015		/* Disable PME to avoid interrupt flood. */
2016		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2017		ret = true;
2018	}
2019
2020	pci_write_config_word(dev, pmcsr_pos, pmcsr);
2021
2022	return ret;
2023}
2024
2025/**
2026 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
2027 * @dev: Device to handle.
2028 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
2029 *
2030 * Check if @dev has generated PME and queue a resume request for it in that
2031 * case.
2032 */
2033static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
2034{
2035	if (pme_poll_reset && dev->pme_poll)
2036		dev->pme_poll = false;
2037
2038	if (pci_check_pme_status(dev)) {
2039		pci_wakeup_event(dev);
2040		pm_request_resume(&dev->dev);
2041	}
2042	return 0;
2043}
2044
2045/**
2046 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
2047 * @bus: Top bus of the subtree to walk.
2048 */
2049void pci_pme_wakeup_bus(struct pci_bus *bus)
2050{
2051	if (bus)
2052		pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
2053}
2054
2055
2056/**
2057 * pci_pme_capable - check the capability of PCI device to generate PME#
2058 * @dev: PCI device to handle.
2059 * @state: PCI state from which device will issue PME#.
2060 */
2061bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
2062{
2063	if (!dev->pm_cap)
2064		return false;
2065
2066	return !!(dev->pme_support & (1 << state));
2067}
2068EXPORT_SYMBOL(pci_pme_capable);
2069
2070static void pci_pme_list_scan(struct work_struct *work)
2071{
2072	struct pci_pme_device *pme_dev, *n;
2073
2074	mutex_lock(&pci_pme_list_mutex);
2075	list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
2076		if (pme_dev->dev->pme_poll) {
2077			struct pci_dev *bridge;
2078
2079			bridge = pme_dev->dev->bus->self;
2080			/*
2081			 * If bridge is in low power state, the
2082			 * configuration space of subordinate devices
2083			 * may be not accessible
2084			 */
2085			if (bridge && bridge->current_state != PCI_D0)
2086				continue;
2087			/*
2088			 * If the device is in D3cold it should not be
2089			 * polled either.
2090			 */
2091			if (pme_dev->dev->current_state == PCI_D3cold)
2092				continue;
2093
2094			pci_pme_wakeup(pme_dev->dev, NULL);
2095		} else {
2096			list_del(&pme_dev->list);
2097			kfree(pme_dev);
2098		}
2099	}
2100	if (!list_empty(&pci_pme_list))
2101		queue_delayed_work(system_freezable_wq, &pci_pme_work,
2102				   msecs_to_jiffies(PME_TIMEOUT));
2103	mutex_unlock(&pci_pme_list_mutex);
2104}
2105
2106static void __pci_pme_active(struct pci_dev *dev, bool enable)
2107{
2108	u16 pmcsr;
2109
2110	if (!dev->pme_support)
2111		return;
2112
2113	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2114	/* Clear PME_Status by writing 1 to it and enable PME# */
2115	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
2116	if (!enable)
2117		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2118
2119	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2120}
2121
2122/**
2123 * pci_pme_restore - Restore PME configuration after config space restore.
2124 * @dev: PCI device to update.
 
2125 */
2126void pci_pme_restore(struct pci_dev *dev)
2127{
2128	u16 pmcsr;
2129
2130	if (!dev->pme_support)
2131		return;
2132
2133	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
2134	if (dev->wakeup_prepared) {
2135		pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2136		pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
2137	} else {
2138		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
2139		pmcsr |= PCI_PM_CTRL_PME_STATUS;
2140	}
2141	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2142}
2143
2144/**
2145 * pci_pme_active - enable or disable PCI device's PME# function
2146 * @dev: PCI device to handle.
2147 * @enable: 'true' to enable PME# generation; 'false' to disable it.
2148 *
2149 * The caller must verify that the device is capable of generating PME# before
2150 * calling this function with @enable equal to 'true'.
2151 */
2152void pci_pme_active(struct pci_dev *dev, bool enable)
2153{
2154	__pci_pme_active(dev, enable);
2155
2156	/*
2157	 * PCI (as opposed to PCIe) PME requires that the device have
2158	 * its PME# line hooked up correctly. Not all hardware vendors
2159	 * do this, so the PME never gets delivered and the device
2160	 * remains asleep. The easiest way around this is to
2161	 * periodically walk the list of suspended devices and check
2162	 * whether any have their PME flag set. The assumption is that
2163	 * we'll wake up often enough anyway that this won't be a huge
2164	 * hit, and the power savings from the devices will still be a
2165	 * win.
2166	 *
2167	 * Although PCIe uses in-band PME message instead of PME# line
2168	 * to report PME, PME does not work for some PCIe devices in
2169	 * reality.  For example, there are devices that set their PME
2170	 * status bits, but don't really bother to send a PME message;
2171	 * there are PCI Express Root Ports that don't bother to
2172	 * trigger interrupts when they receive PME messages from the
2173	 * devices below.  So PME poll is used for PCIe devices too.
2174	 */
2175
2176	if (dev->pme_poll) {
 
 
 
 
 
 
 
 
 
 
2177		struct pci_pme_device *pme_dev;
2178		if (enable) {
2179			pme_dev = kmalloc(sizeof(struct pci_pme_device),
2180					  GFP_KERNEL);
2181			if (!pme_dev) {
2182				pci_warn(dev, "can't enable PME#\n");
2183				return;
2184			}
2185			pme_dev->dev = dev;
2186			mutex_lock(&pci_pme_list_mutex);
2187			list_add(&pme_dev->list, &pci_pme_list);
2188			if (list_is_singular(&pci_pme_list))
2189				queue_delayed_work(system_freezable_wq,
2190						   &pci_pme_work,
2191						   msecs_to_jiffies(PME_TIMEOUT));
2192			mutex_unlock(&pci_pme_list_mutex);
2193		} else {
2194			mutex_lock(&pci_pme_list_mutex);
2195			list_for_each_entry(pme_dev, &pci_pme_list, list) {
2196				if (pme_dev->dev == dev) {
2197					list_del(&pme_dev->list);
2198					kfree(pme_dev);
2199					break;
2200				}
2201			}
2202			mutex_unlock(&pci_pme_list_mutex);
2203		}
2204	}
2205
2206	pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
 
 
2207}
2208EXPORT_SYMBOL(pci_pme_active);
2209
2210/**
2211 * __pci_enable_wake - enable PCI device as wakeup event source
2212 * @dev: PCI device affected
2213 * @state: PCI state from which device will issue wakeup events
 
2214 * @enable: True to enable event generation; false to disable
2215 *
2216 * This enables the device as a wakeup event source, or disables it.
2217 * When such events involves platform-specific hooks, those hooks are
2218 * called automatically by this routine.
2219 *
2220 * Devices with legacy power management (no standard PCI PM capabilities)
2221 * always require such platform hooks.
2222 *
2223 * RETURN VALUE:
2224 * 0 is returned on success
2225 * -EINVAL is returned if device is not supposed to wake up the system
2226 * Error code depending on the platform is returned if both the platform and
2227 * the native mechanism fail to enable the generation of wake-up events
2228 */
2229static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
 
2230{
2231	int ret = 0;
2232
2233	/*
2234	 * Bridges that are not power-manageable directly only signal
2235	 * wakeup on behalf of subordinate devices which is set up
2236	 * elsewhere, so skip them. However, bridges that are
2237	 * power-manageable may signal wakeup for themselves (for example,
2238	 * on a hotplug event) and they need to be covered here.
2239	 */
2240	if (!pci_power_manageable(dev))
2241		return 0;
2242
2243	/* Don't do the same thing twice in a row for one device. */
2244	if (!!enable == !!dev->wakeup_prepared)
2245		return 0;
2246
2247	/*
2248	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
2249	 * Anderson we should be doing PME# wake enable followed by ACPI wake
2250	 * enable.  To disable wake-up we call the platform first, for symmetry.
2251	 */
2252
2253	if (enable) {
2254		int error;
2255
2256		if (pci_pme_capable(dev, state))
2257			pci_pme_active(dev, true);
2258		else
2259			ret = 1;
2260		error = platform_pci_set_wakeup(dev, true);
 
2261		if (ret)
2262			ret = error;
2263		if (!ret)
2264			dev->wakeup_prepared = true;
2265	} else {
2266		platform_pci_set_wakeup(dev, false);
 
 
 
2267		pci_pme_active(dev, false);
2268		dev->wakeup_prepared = false;
2269	}
2270
2271	return ret;
2272}
2273
2274/**
2275 * pci_enable_wake - change wakeup settings for a PCI device
2276 * @pci_dev: Target device
2277 * @state: PCI state from which device will issue wakeup events
2278 * @enable: Whether or not to enable event generation
2279 *
2280 * If @enable is set, check device_may_wakeup() for the device before calling
2281 * __pci_enable_wake() for it.
2282 */
2283int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
2284{
2285	if (enable && !device_may_wakeup(&pci_dev->dev))
2286		return -EINVAL;
2287
2288	return __pci_enable_wake(pci_dev, state, enable);
2289}
2290EXPORT_SYMBOL(pci_enable_wake);
2291
2292/**
2293 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2294 * @dev: PCI device to prepare
2295 * @enable: True to enable wake-up event generation; false to disable
2296 *
2297 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2298 * and this function allows them to set that up cleanly - pci_enable_wake()
2299 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2300 * ordering constraints.
2301 *
2302 * This function only returns error code if the device is not allowed to wake
2303 * up the system from sleep or it is not capable of generating PME# from both
2304 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
2305 */
2306int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2307{
2308	return pci_pme_capable(dev, PCI_D3cold) ?
2309			pci_enable_wake(dev, PCI_D3cold, enable) :
2310			pci_enable_wake(dev, PCI_D3hot, enable);
2311}
2312EXPORT_SYMBOL(pci_wake_from_d3);
2313
2314/**
2315 * pci_target_state - find an appropriate low power state for a given PCI dev
2316 * @dev: PCI device
2317 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
2318 *
2319 * Use underlying platform code to find a supported low power state for @dev.
2320 * If the platform can't manage @dev, return the deepest state from which it
2321 * can generate wake events, based on any available PME info.
2322 */
2323static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
2324{
2325	pci_power_t target_state = PCI_D3hot;
2326
2327	if (platform_pci_power_manageable(dev)) {
2328		/*
2329		 * Call the platform to find the target state for the device.
 
2330		 */
2331		pci_power_t state = platform_pci_choose_state(dev);
2332
2333		switch (state) {
2334		case PCI_POWER_ERROR:
2335		case PCI_UNKNOWN:
2336			break;
2337		case PCI_D1:
2338		case PCI_D2:
2339			if (pci_no_d1d2(dev))
2340				break;
2341			/* else, fall through */
2342		default:
2343			target_state = state;
2344		}
2345
2346		return target_state;
2347	}
2348
2349	if (!dev->pm_cap)
2350		target_state = PCI_D0;
2351
2352	/*
2353	 * If the device is in D3cold even though it's not power-manageable by
2354	 * the platform, it may have been powered down by non-standard means.
2355	 * Best to let it slumber.
2356	 */
2357	if (dev->current_state == PCI_D3cold)
2358		target_state = PCI_D3cold;
2359
2360	if (wakeup) {
2361		/*
2362		 * Find the deepest state from which the device can generate
2363		 * PME#.
 
2364		 */
2365		if (dev->pme_support) {
2366			while (target_state
2367			      && !(dev->pme_support & (1 << target_state)))
2368				target_state--;
2369		}
2370	}
2371
2372	return target_state;
2373}
2374
2375/**
2376 * pci_prepare_to_sleep - prepare PCI device for system-wide transition
2377 *			  into a sleep state
2378 * @dev: Device to handle.
2379 *
2380 * Choose the power state appropriate for the device depending on whether
2381 * it can wake up the system and/or is power manageable by the platform
2382 * (PCI_D3hot is the default) and put the device into that state.
2383 */
2384int pci_prepare_to_sleep(struct pci_dev *dev)
2385{
2386	bool wakeup = device_may_wakeup(&dev->dev);
2387	pci_power_t target_state = pci_target_state(dev, wakeup);
2388	int error;
2389
2390	if (target_state == PCI_POWER_ERROR)
2391		return -EIO;
2392
2393	pci_enable_wake(dev, target_state, wakeup);
2394
2395	error = pci_set_power_state(dev, target_state);
2396
2397	if (error)
2398		pci_enable_wake(dev, target_state, false);
2399
2400	return error;
2401}
2402EXPORT_SYMBOL(pci_prepare_to_sleep);
2403
2404/**
2405 * pci_back_from_sleep - turn PCI device on during system-wide transition
2406 *			 into working state
2407 * @dev: Device to handle.
2408 *
2409 * Disable device's system wake-up capability and put it into D0.
2410 */
2411int pci_back_from_sleep(struct pci_dev *dev)
2412{
2413	pci_enable_wake(dev, PCI_D0, false);
2414	return pci_set_power_state(dev, PCI_D0);
2415}
2416EXPORT_SYMBOL(pci_back_from_sleep);
2417
2418/**
2419 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2420 * @dev: PCI device being suspended.
2421 *
2422 * Prepare @dev to generate wake-up events at run time and put it into a low
2423 * power state.
2424 */
2425int pci_finish_runtime_suspend(struct pci_dev *dev)
2426{
2427	pci_power_t target_state;
2428	int error;
2429
2430	target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
2431	if (target_state == PCI_POWER_ERROR)
2432		return -EIO;
2433
2434	dev->runtime_d3cold = target_state == PCI_D3cold;
2435
2436	__pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
2437
2438	error = pci_set_power_state(dev, target_state);
2439
2440	if (error) {
2441		pci_enable_wake(dev, target_state, false);
2442		dev->runtime_d3cold = false;
2443	}
2444
2445	return error;
2446}
2447
2448/**
2449 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2450 * @dev: Device to check.
2451 *
2452 * Return true if the device itself is capable of generating wake-up events
2453 * (through the platform or using the native PCIe PME) or if the device supports
2454 * PME and one of its upstream bridges can generate wake-up events.
2455 */
2456bool pci_dev_run_wake(struct pci_dev *dev)
2457{
2458	struct pci_bus *bus = dev->bus;
2459
2460	if (!dev->pme_support)
2461		return false;
2462
2463	/* PME-capable in principle, but not from the target power state */
2464	if (!pci_pme_capable(dev, pci_target_state(dev, true)))
2465		return false;
2466
2467	if (device_can_wakeup(&dev->dev))
2468		return true;
2469
2470	while (bus->parent) {
2471		struct pci_dev *bridge = bus->self;
2472
2473		if (device_can_wakeup(&bridge->dev))
2474			return true;
2475
2476		bus = bus->parent;
2477	}
2478
2479	/* We have reached the root bus. */
2480	if (bus->bridge)
2481		return device_can_wakeup(bus->bridge);
2482
2483	return false;
2484}
2485EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2486
2487/**
2488 * pci_dev_need_resume - Check if it is necessary to resume the device.
2489 * @pci_dev: Device to check.
2490 *
2491 * Return 'true' if the device is not runtime-suspended or it has to be
2492 * reconfigured due to wakeup settings difference between system and runtime
2493 * suspend, or the current power state of it is not suitable for the upcoming
2494 * (system-wide) transition.
2495 */
2496bool pci_dev_need_resume(struct pci_dev *pci_dev)
2497{
2498	struct device *dev = &pci_dev->dev;
2499	pci_power_t target_state;
2500
2501	if (!pm_runtime_suspended(dev) || platform_pci_need_resume(pci_dev))
2502		return true;
2503
2504	target_state = pci_target_state(pci_dev, device_may_wakeup(dev));
2505
2506	/*
2507	 * If the earlier platform check has not triggered, D3cold is just power
2508	 * removal on top of D3hot, so no need to resume the device in that
2509	 * case.
2510	 */
2511	return target_state != pci_dev->current_state &&
2512		target_state != PCI_D3cold &&
2513		pci_dev->current_state != PCI_D3hot;
2514}
2515
2516/**
2517 * pci_dev_adjust_pme - Adjust PME setting for a suspended device.
2518 * @pci_dev: Device to check.
2519 *
2520 * If the device is suspended and it is not configured for system wakeup,
2521 * disable PME for it to prevent it from waking up the system unnecessarily.
2522 *
2523 * Note that if the device's power state is D3cold and the platform check in
2524 * pci_dev_need_resume() has not triggered, the device's configuration need not
2525 * be changed.
2526 */
2527void pci_dev_adjust_pme(struct pci_dev *pci_dev)
2528{
2529	struct device *dev = &pci_dev->dev;
2530
2531	spin_lock_irq(&dev->power.lock);
2532
2533	if (pm_runtime_suspended(dev) && !device_may_wakeup(dev) &&
2534	    pci_dev->current_state < PCI_D3cold)
2535		__pci_pme_active(pci_dev, false);
2536
2537	spin_unlock_irq(&dev->power.lock);
2538}
2539
2540/**
2541 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2542 * @pci_dev: Device to handle.
2543 *
2544 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2545 * it might have been disabled during the prepare phase of system suspend if
2546 * the device was not configured for system wakeup.
2547 */
2548void pci_dev_complete_resume(struct pci_dev *pci_dev)
2549{
2550	struct device *dev = &pci_dev->dev;
2551
2552	if (!pci_dev_run_wake(pci_dev))
2553		return;
2554
2555	spin_lock_irq(&dev->power.lock);
2556
2557	if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2558		__pci_pme_active(pci_dev, true);
2559
2560	spin_unlock_irq(&dev->power.lock);
2561}
2562
2563void pci_config_pm_runtime_get(struct pci_dev *pdev)
2564{
2565	struct device *dev = &pdev->dev;
2566	struct device *parent = dev->parent;
2567
2568	if (parent)
2569		pm_runtime_get_sync(parent);
2570	pm_runtime_get_noresume(dev);
2571	/*
2572	 * pdev->current_state is set to PCI_D3cold during suspending,
2573	 * so wait until suspending completes
2574	 */
2575	pm_runtime_barrier(dev);
2576	/*
2577	 * Only need to resume devices in D3cold, because config
2578	 * registers are still accessible for devices suspended but
2579	 * not in D3cold.
2580	 */
2581	if (pdev->current_state == PCI_D3cold)
2582		pm_runtime_resume(dev);
2583}
2584
2585void pci_config_pm_runtime_put(struct pci_dev *pdev)
2586{
2587	struct device *dev = &pdev->dev;
2588	struct device *parent = dev->parent;
2589
2590	pm_runtime_put(dev);
2591	if (parent)
2592		pm_runtime_put_sync(parent);
2593}
2594
2595static const struct dmi_system_id bridge_d3_blacklist[] = {
2596#ifdef CONFIG_X86
2597	{
2598		/*
2599		 * Gigabyte X299 root port is not marked as hotplug capable
2600		 * which allows Linux to power manage it.  However, this
2601		 * confuses the BIOS SMI handler so don't power manage root
2602		 * ports on that system.
2603		 */
2604		.ident = "X299 DESIGNARE EX-CF",
2605		.matches = {
2606			DMI_MATCH(DMI_BOARD_VENDOR, "Gigabyte Technology Co., Ltd."),
2607			DMI_MATCH(DMI_BOARD_NAME, "X299 DESIGNARE EX-CF"),
2608		},
2609	},
2610#endif
2611	{ }
2612};
2613
2614/**
2615 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2616 * @bridge: Bridge to check
2617 *
2618 * This function checks if it is possible to move the bridge to D3.
2619 * Currently we only allow D3 for recent enough PCIe ports and Thunderbolt.
2620 */
2621bool pci_bridge_d3_possible(struct pci_dev *bridge)
2622{
2623	if (!pci_is_pcie(bridge))
2624		return false;
2625
2626	switch (pci_pcie_type(bridge)) {
2627	case PCI_EXP_TYPE_ROOT_PORT:
2628	case PCI_EXP_TYPE_UPSTREAM:
2629	case PCI_EXP_TYPE_DOWNSTREAM:
2630		if (pci_bridge_d3_disable)
2631			return false;
2632
2633		/*
2634		 * Hotplug ports handled by firmware in System Management Mode
2635		 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
2636		 */
2637		if (bridge->is_hotplug_bridge && !pciehp_is_native(bridge))
2638			return false;
2639
2640		if (pci_bridge_d3_force)
2641			return true;
2642
2643		/* Even the oldest 2010 Thunderbolt controller supports D3. */
2644		if (bridge->is_thunderbolt)
2645			return true;
2646
2647		/* Platform might know better if the bridge supports D3 */
2648		if (platform_pci_bridge_d3(bridge))
2649			return true;
2650
2651		/*
2652		 * Hotplug ports handled natively by the OS were not validated
2653		 * by vendors for runtime D3 at least until 2018 because there
2654		 * was no OS support.
2655		 */
2656		if (bridge->is_hotplug_bridge)
2657			return false;
2658
2659		if (dmi_check_system(bridge_d3_blacklist))
2660			return false;
2661
2662		/*
2663		 * It should be safe to put PCIe ports from 2015 or newer
2664		 * to D3.
2665		 */
2666		if (dmi_get_bios_year() >= 2015)
2667			return true;
2668		break;
2669	}
2670
2671	return false;
2672}
2673
2674static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2675{
2676	bool *d3cold_ok = data;
2677
2678	if (/* The device needs to be allowed to go D3cold ... */
2679	    dev->no_d3cold || !dev->d3cold_allowed ||
2680
2681	    /* ... and if it is wakeup capable to do so from D3cold. */
2682	    (device_may_wakeup(&dev->dev) &&
2683	     !pci_pme_capable(dev, PCI_D3cold)) ||
2684
2685	    /* If it is a bridge it must be allowed to go to D3. */
2686	    !pci_power_manageable(dev))
2687
2688		*d3cold_ok = false;
2689
2690	return !*d3cold_ok;
2691}
2692
2693/*
2694 * pci_bridge_d3_update - Update bridge D3 capabilities
2695 * @dev: PCI device which is changed
2696 *
2697 * Update upstream bridge PM capabilities accordingly depending on if the
2698 * device PM configuration was changed or the device is being removed.  The
2699 * change is also propagated upstream.
2700 */
2701void pci_bridge_d3_update(struct pci_dev *dev)
2702{
2703	bool remove = !device_is_registered(&dev->dev);
2704	struct pci_dev *bridge;
2705	bool d3cold_ok = true;
2706
2707	bridge = pci_upstream_bridge(dev);
2708	if (!bridge || !pci_bridge_d3_possible(bridge))
2709		return;
2710
2711	/*
2712	 * If D3 is currently allowed for the bridge, removing one of its
2713	 * children won't change that.
2714	 */
2715	if (remove && bridge->bridge_d3)
2716		return;
2717
2718	/*
2719	 * If D3 is currently allowed for the bridge and a child is added or
2720	 * changed, disallowance of D3 can only be caused by that child, so
2721	 * we only need to check that single device, not any of its siblings.
2722	 *
2723	 * If D3 is currently not allowed for the bridge, checking the device
2724	 * first may allow us to skip checking its siblings.
2725	 */
2726	if (!remove)
2727		pci_dev_check_d3cold(dev, &d3cold_ok);
2728
2729	/*
2730	 * If D3 is currently not allowed for the bridge, this may be caused
2731	 * either by the device being changed/removed or any of its siblings,
2732	 * so we need to go through all children to find out if one of them
2733	 * continues to block D3.
2734	 */
2735	if (d3cold_ok && !bridge->bridge_d3)
2736		pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2737			     &d3cold_ok);
2738
2739	if (bridge->bridge_d3 != d3cold_ok) {
2740		bridge->bridge_d3 = d3cold_ok;
2741		/* Propagate change to upstream bridges */
2742		pci_bridge_d3_update(bridge);
2743	}
2744}
2745
2746/**
2747 * pci_d3cold_enable - Enable D3cold for device
2748 * @dev: PCI device to handle
2749 *
2750 * This function can be used in drivers to enable D3cold from the device
2751 * they handle.  It also updates upstream PCI bridge PM capabilities
2752 * accordingly.
2753 */
2754void pci_d3cold_enable(struct pci_dev *dev)
2755{
2756	if (dev->no_d3cold) {
2757		dev->no_d3cold = false;
2758		pci_bridge_d3_update(dev);
2759	}
2760}
2761EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2762
2763/**
2764 * pci_d3cold_disable - Disable D3cold for device
2765 * @dev: PCI device to handle
2766 *
2767 * This function can be used in drivers to disable D3cold from the device
2768 * they handle.  It also updates upstream PCI bridge PM capabilities
2769 * accordingly.
2770 */
2771void pci_d3cold_disable(struct pci_dev *dev)
2772{
2773	if (!dev->no_d3cold) {
2774		dev->no_d3cold = true;
2775		pci_bridge_d3_update(dev);
2776	}
2777}
2778EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2779
2780/**
2781 * pci_pm_init - Initialize PM functions of given PCI device
2782 * @dev: PCI device to handle.
2783 */
2784void pci_pm_init(struct pci_dev *dev)
2785{
2786	int pm;
2787	u16 status;
2788	u16 pmc;
2789
2790	pm_runtime_forbid(&dev->dev);
2791	pm_runtime_set_active(&dev->dev);
2792	pm_runtime_enable(&dev->dev);
2793	device_enable_async_suspend(&dev->dev);
2794	dev->wakeup_prepared = false;
2795
2796	dev->pm_cap = 0;
2797	dev->pme_support = 0;
2798
2799	/* find PCI PM capability in list */
2800	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2801	if (!pm)
2802		return;
2803	/* Check device's ability to generate PME# */
2804	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
2805
2806	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
2807		pci_err(dev, "unsupported PM cap regs version (%u)\n",
2808			pmc & PCI_PM_CAP_VER_MASK);
2809		return;
2810	}
2811
2812	dev->pm_cap = pm;
2813	dev->d3_delay = PCI_PM_D3_WAIT;
2814	dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
2815	dev->bridge_d3 = pci_bridge_d3_possible(dev);
2816	dev->d3cold_allowed = true;
2817
2818	dev->d1_support = false;
2819	dev->d2_support = false;
2820	if (!pci_no_d1d2(dev)) {
2821		if (pmc & PCI_PM_CAP_D1)
2822			dev->d1_support = true;
2823		if (pmc & PCI_PM_CAP_D2)
2824			dev->d2_support = true;
2825
2826		if (dev->d1_support || dev->d2_support)
2827			pci_info(dev, "supports%s%s\n",
2828				   dev->d1_support ? " D1" : "",
2829				   dev->d2_support ? " D2" : "");
2830	}
2831
2832	pmc &= PCI_PM_CAP_PME_MASK;
2833	if (pmc) {
2834		pci_info(dev, "PME# supported from%s%s%s%s%s\n",
 
2835			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2836			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2837			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2838			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2839			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
2840		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
2841		dev->pme_poll = true;
2842		/*
2843		 * Make device's PM flags reflect the wake-up capability, but
2844		 * let the user space enable it to wake up the system as needed.
2845		 */
2846		device_set_wakeup_capable(&dev->dev, true);
2847		/* Disable the PME# generation functionality */
2848		pci_pme_active(dev, false);
 
 
2849	}
2850
2851	pci_read_config_word(dev, PCI_STATUS, &status);
2852	if (status & PCI_STATUS_IMM_READY)
2853		dev->imm_ready = 1;
2854}
2855
2856static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2857{
2858	unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
2859
2860	switch (prop) {
2861	case PCI_EA_P_MEM:
2862	case PCI_EA_P_VF_MEM:
2863		flags |= IORESOURCE_MEM;
2864		break;
2865	case PCI_EA_P_MEM_PREFETCH:
2866	case PCI_EA_P_VF_MEM_PREFETCH:
2867		flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2868		break;
2869	case PCI_EA_P_IO:
2870		flags |= IORESOURCE_IO;
2871		break;
2872	default:
2873		return 0;
2874	}
2875
2876	return flags;
2877}
2878
2879static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2880					    u8 prop)
2881{
2882	if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2883		return &dev->resource[bei];
2884#ifdef CONFIG_PCI_IOV
2885	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2886		 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2887		return &dev->resource[PCI_IOV_RESOURCES +
2888				      bei - PCI_EA_BEI_VF_BAR0];
2889#endif
2890	else if (bei == PCI_EA_BEI_ROM)
2891		return &dev->resource[PCI_ROM_RESOURCE];
2892	else
2893		return NULL;
2894}
2895
2896/* Read an Enhanced Allocation (EA) entry */
2897static int pci_ea_read(struct pci_dev *dev, int offset)
2898{
2899	struct resource *res;
2900	int ent_size, ent_offset = offset;
2901	resource_size_t start, end;
2902	unsigned long flags;
2903	u32 dw0, bei, base, max_offset;
2904	u8 prop;
2905	bool support_64 = (sizeof(resource_size_t) >= 8);
2906
2907	pci_read_config_dword(dev, ent_offset, &dw0);
2908	ent_offset += 4;
2909
2910	/* Entry size field indicates DWORDs after 1st */
2911	ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2912
2913	if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2914		goto out;
2915
2916	bei = (dw0 & PCI_EA_BEI) >> 4;
2917	prop = (dw0 & PCI_EA_PP) >> 8;
2918
2919	/*
2920	 * If the Property is in the reserved range, try the Secondary
2921	 * Property instead.
2922	 */
2923	if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
2924		prop = (dw0 & PCI_EA_SP) >> 16;
2925	if (prop > PCI_EA_P_BRIDGE_IO)
2926		goto out;
2927
2928	res = pci_ea_get_resource(dev, bei, prop);
2929	if (!res) {
2930		pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
2931		goto out;
2932	}
2933
2934	flags = pci_ea_flags(dev, prop);
2935	if (!flags) {
2936		pci_err(dev, "Unsupported EA properties: %#x\n", prop);
2937		goto out;
2938	}
2939
2940	/* Read Base */
2941	pci_read_config_dword(dev, ent_offset, &base);
2942	start = (base & PCI_EA_FIELD_MASK);
2943	ent_offset += 4;
2944
2945	/* Read MaxOffset */
2946	pci_read_config_dword(dev, ent_offset, &max_offset);
2947	ent_offset += 4;
2948
2949	/* Read Base MSBs (if 64-bit entry) */
2950	if (base & PCI_EA_IS_64) {
2951		u32 base_upper;
2952
2953		pci_read_config_dword(dev, ent_offset, &base_upper);
2954		ent_offset += 4;
2955
2956		flags |= IORESOURCE_MEM_64;
2957
2958		/* entry starts above 32-bit boundary, can't use */
2959		if (!support_64 && base_upper)
2960			goto out;
2961
2962		if (support_64)
2963			start |= ((u64)base_upper << 32);
2964	}
2965
2966	end = start + (max_offset | 0x03);
2967
2968	/* Read MaxOffset MSBs (if 64-bit entry) */
2969	if (max_offset & PCI_EA_IS_64) {
2970		u32 max_offset_upper;
2971
2972		pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2973		ent_offset += 4;
2974
2975		flags |= IORESOURCE_MEM_64;
2976
2977		/* entry too big, can't use */
2978		if (!support_64 && max_offset_upper)
2979			goto out;
2980
2981		if (support_64)
2982			end += ((u64)max_offset_upper << 32);
2983	}
2984
2985	if (end < start) {
2986		pci_err(dev, "EA Entry crosses address boundary\n");
2987		goto out;
2988	}
2989
2990	if (ent_size != ent_offset - offset) {
2991		pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
2992			ent_size, ent_offset - offset);
2993		goto out;
2994	}
2995
2996	res->name = pci_name(dev);
2997	res->start = start;
2998	res->end = end;
2999	res->flags = flags;
3000
3001	if (bei <= PCI_EA_BEI_BAR5)
3002		pci_info(dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3003			   bei, res, prop);
3004	else if (bei == PCI_EA_BEI_ROM)
3005		pci_info(dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
3006			   res, prop);
3007	else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
3008		pci_info(dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
3009			   bei - PCI_EA_BEI_VF_BAR0, res, prop);
3010	else
3011		pci_info(dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
3012			   bei, res, prop);
3013
3014out:
3015	return offset + ent_size;
3016}
3017
3018/* Enhanced Allocation Initialization */
3019void pci_ea_init(struct pci_dev *dev)
 
 
 
 
 
 
 
 
 
3020{
3021	int ea;
3022	u8 num_ent;
3023	int offset;
3024	int i;
3025
3026	/* find PCI EA capability in list */
3027	ea = pci_find_capability(dev, PCI_CAP_ID_EA);
3028	if (!ea)
3029		return;
3030
3031	/* determine the number of entries */
3032	pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
3033					&num_ent);
3034	num_ent &= PCI_EA_NUM_ENT_MASK;
3035
3036	offset = ea + PCI_EA_FIRST_ENT;
3037
3038	/* Skip DWORD 2 for type 1 functions */
3039	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
3040		offset += 4;
3041
3042	/* parse each EA entry */
3043	for (i = 0; i < num_ent; ++i)
3044		offset = pci_ea_read(dev, offset);
3045}
3046
3047static void pci_add_saved_cap(struct pci_dev *pci_dev,
3048	struct pci_cap_saved_state *new_cap)
3049{
3050	hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
3051}
3052
3053/**
3054 * _pci_add_cap_save_buffer - allocate buffer for saving given
3055 *			      capability registers
3056 * @dev: the PCI device
3057 * @cap: the capability to allocate the buffer for
3058 * @extended: Standard or Extended capability ID
3059 * @size: requested size of the buffer
3060 */
3061static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
3062				    bool extended, unsigned int size)
3063{
3064	int pos;
3065	struct pci_cap_saved_state *save_state;
3066
3067	if (extended)
3068		pos = pci_find_ext_capability(dev, cap);
3069	else
3070		pos = pci_find_capability(dev, cap);
3071
3072	if (!pos)
3073		return 0;
3074
3075	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
3076	if (!save_state)
3077		return -ENOMEM;
3078
3079	save_state->cap.cap_nr = cap;
3080	save_state->cap.cap_extended = extended;
3081	save_state->cap.size = size;
3082	pci_add_saved_cap(dev, save_state);
3083
3084	return 0;
3085}
3086
3087int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
3088{
3089	return _pci_add_cap_save_buffer(dev, cap, false, size);
3090}
3091
3092int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
3093{
3094	return _pci_add_cap_save_buffer(dev, cap, true, size);
3095}
3096
3097/**
3098 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
3099 * @dev: the PCI device
3100 */
3101void pci_allocate_cap_save_buffers(struct pci_dev *dev)
3102{
3103	int error;
3104
3105	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
3106					PCI_EXP_SAVE_REGS * sizeof(u16));
3107	if (error)
3108		pci_err(dev, "unable to preallocate PCI Express save buffer\n");
 
3109
3110	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
3111	if (error)
3112		pci_err(dev, "unable to preallocate PCI-X save buffer\n");
3113
3114	error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_LTR,
3115					    2 * sizeof(u16));
3116	if (error)
3117		pci_err(dev, "unable to allocate suspend buffer for LTR\n");
3118
3119	pci_allocate_vc_save_buffers(dev);
3120}
3121
3122void pci_free_cap_save_buffers(struct pci_dev *dev)
3123{
3124	struct pci_cap_saved_state *tmp;
3125	struct hlist_node *n;
3126
3127	hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
3128		kfree(tmp);
3129}
3130
3131/**
3132 * pci_configure_ari - enable or disable ARI forwarding
3133 * @dev: the PCI device
3134 *
3135 * If @dev and its upstream bridge both support ARI, enable ARI in the
3136 * bridge.  Otherwise, disable ARI in the bridge.
3137 */
3138void pci_configure_ari(struct pci_dev *dev)
3139{
 
3140	u32 cap;
 
3141	struct pci_dev *bridge;
3142
3143	if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
 
 
 
 
3144		return;
3145
3146	bridge = dev->bus->self;
3147	if (!bridge)
3148		return;
3149
3150	pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3151	if (!(cap & PCI_EXP_DEVCAP2_ARI))
3152		return;
3153
3154	if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
3155		pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
3156					 PCI_EXP_DEVCTL2_ARI);
3157		bridge->ari_enabled = 1;
3158	} else {
3159		pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
3160					   PCI_EXP_DEVCTL2_ARI);
3161		bridge->ari_enabled = 0;
3162	}
3163}
3164
3165static int pci_acs_enable;
 
 
3166
3167/**
3168 * pci_request_acs - ask for ACS to be enabled if supported
3169 */
3170void pci_request_acs(void)
3171{
3172	pci_acs_enable = 1;
3173}
3174
3175static const char *disable_acs_redir_param;
 
3176
3177/**
3178 * pci_disable_acs_redir - disable ACS redirect capabilities
3179 * @dev: the PCI device
 
3180 *
3181 * For only devices specified in the disable_acs_redir parameter.
 
 
3182 */
3183static void pci_disable_acs_redir(struct pci_dev *dev)
3184{
3185	int ret = 0;
3186	const char *p;
3187	int pos;
3188	u16 ctrl;
3189
3190	if (!disable_acs_redir_param)
 
3191		return;
3192
3193	p = disable_acs_redir_param;
3194	while (*p) {
3195		ret = pci_dev_str_match(dev, p, &p);
3196		if (ret < 0) {
3197			pr_info_once("PCI: Can't parse disable_acs_redir parameter: %s\n",
3198				     disable_acs_redir_param);
3199
3200			break;
3201		} else if (ret == 1) {
3202			/* Found a match */
3203			break;
3204		}
3205
3206		if (*p != ';' && *p != ',') {
3207			/* End of param or invalid format */
3208			break;
3209		}
3210		p++;
3211	}
3212
3213	if (ret != 1)
3214		return;
 
 
 
 
 
 
 
3215
3216	if (!pci_dev_specific_disable_acs_redir(dev))
3217		return;
3218
3219	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3220	if (!pos) {
3221		pci_warn(dev, "cannot disable ACS redirect for this hardware as it does not have ACS capabilities\n");
3222		return;
3223	}
3224
3225	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3226
3227	/* P2P Request & Completion Redirect */
3228	ctrl &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_EC);
3229
3230	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3231
3232	pci_info(dev, "disabled ACS redirect\n");
 
 
 
3233}
 
3234
3235/**
3236 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilities
3237 * @dev: the PCI device
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3238 */
3239static void pci_std_enable_acs(struct pci_dev *dev)
3240{
3241	int pos;
3242	u16 cap;
3243	u16 ctrl;
 
3244
3245	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
3246	if (!pos)
3247		return;
3248
3249	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
3250	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
3251
3252	/* Source Validation */
3253	ctrl |= (cap & PCI_ACS_SV);
3254
3255	/* P2P Request Redirect */
3256	ctrl |= (cap & PCI_ACS_RR);
 
3257
3258	/* P2P Completion Redirect */
3259	ctrl |= (cap & PCI_ACS_CR);
 
 
 
 
 
 
 
 
3260
3261	/* Upstream Forwarding */
3262	ctrl |= (cap & PCI_ACS_UF);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3263
3264	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
3265}
 
3266
3267/**
3268 * pci_enable_acs - enable ACS if hardware support it
3269 * @dev: the PCI device
 
 
3270 */
3271void pci_enable_acs(struct pci_dev *dev)
3272{
3273	if (!pci_acs_enable)
3274		goto disable_acs_redir;
3275
3276	if (!pci_dev_specific_enable_acs(dev))
3277		goto disable_acs_redir;
3278
3279	pci_std_enable_acs(dev);
 
 
3280
3281disable_acs_redir:
3282	/*
3283	 * Note: pci_disable_acs_redir() must be called even if ACS was not
3284	 * enabled by the kernel because it may have been enabled by
3285	 * platform firmware.  So if we are told to disable it, we should
3286	 * always disable it after setting the kernel's default
3287	 * preferences.
3288	 */
3289	pci_disable_acs_redir(dev);
3290}
 
3291
3292static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
 
 
 
 
 
 
 
3293{
3294	int pos;
3295	u16 cap, ctrl;
3296
3297	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
 
 
 
3298	if (!pos)
3299		return false;
3300
3301	/*
3302	 * Except for egress control, capabilities are either required
3303	 * or only required if controllable.  Features missing from the
3304	 * capability field can therefore be assumed as hard-wired enabled.
3305	 */
3306	pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
3307	acs_flags &= (cap | PCI_ACS_EC);
3308
3309	pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
3310	return (ctrl & acs_flags) == acs_flags;
3311}
 
3312
3313/**
3314 * pci_acs_enabled - test ACS against required flags for a given device
3315 * @pdev: device to test
3316 * @acs_flags: required PCI ACS flags
3317 *
3318 * Return true if the device supports the provided flags.  Automatically
3319 * filters out flags that are not implemented on multifunction devices.
3320 *
3321 * Note that this interface checks the effective ACS capabilities of the
3322 * device rather than the actual capabilities.  For instance, most single
3323 * function endpoints are not required to support ACS because they have no
3324 * opportunity for peer-to-peer access.  We therefore return 'true'
3325 * regardless of whether the device exposes an ACS capability.  This makes
3326 * it much easier for callers of this function to ignore the actual type
3327 * or topology of the device when testing ACS support.
3328 */
3329bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
3330{
 
 
3331	int ret;
3332
3333	ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
3334	if (ret >= 0)
3335		return ret > 0;
3336
3337	/*
3338	 * Conventional PCI and PCI-X devices never support ACS, either
3339	 * effectively or actually.  The shared bus topology implies that
3340	 * any device on the bus can receive or snoop DMA.
3341	 */
3342	if (!pci_is_pcie(pdev))
3343		return false;
3344
3345	switch (pci_pcie_type(pdev)) {
3346	/*
3347	 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
3348	 * but since their primary interface is PCI/X, we conservatively
3349	 * handle them as we would a non-PCIe device.
3350	 */
3351	case PCI_EXP_TYPE_PCIE_BRIDGE:
3352	/*
3353	 * PCIe 3.0, 6.12.1 excludes ACS on these devices.  "ACS is never
3354	 * applicable... must never implement an ACS Extended Capability...".
3355	 * This seems arbitrary, but we take a conservative interpretation
3356	 * of this statement.
3357	 */
3358	case PCI_EXP_TYPE_PCI_BRIDGE:
3359	case PCI_EXP_TYPE_RC_EC:
3360		return false;
3361	/*
3362	 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
3363	 * implement ACS in order to indicate their peer-to-peer capabilities,
3364	 * regardless of whether they are single- or multi-function devices.
3365	 */
3366	case PCI_EXP_TYPE_DOWNSTREAM:
3367	case PCI_EXP_TYPE_ROOT_PORT:
3368		return pci_acs_flags_enabled(pdev, acs_flags);
3369	/*
3370	 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
3371	 * implemented by the remaining PCIe types to indicate peer-to-peer
3372	 * capabilities, but only when they are part of a multifunction
3373	 * device.  The footnote for section 6.12 indicates the specific
3374	 * PCIe types included here.
3375	 */
3376	case PCI_EXP_TYPE_ENDPOINT:
3377	case PCI_EXP_TYPE_UPSTREAM:
3378	case PCI_EXP_TYPE_LEG_END:
3379	case PCI_EXP_TYPE_RC_END:
3380		if (!pdev->multifunction)
3381			break;
3382
3383		return pci_acs_flags_enabled(pdev, acs_flags);
 
 
 
 
3384	}
3385
3386	/*
3387	 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
3388	 * to single function devices with the exception of downstream ports.
3389	 */
3390	return true;
3391}
 
3392
3393/**
3394 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
3395 * @start: starting downstream device
3396 * @end: ending upstream device or NULL to search to the root bus
3397 * @acs_flags: required flags
3398 *
3399 * Walk up a device tree from start to end testing PCI ACS support.  If
3400 * any step along the way does not support the required flags, return false.
3401 */
3402bool pci_acs_path_enabled(struct pci_dev *start,
3403			  struct pci_dev *end, u16 acs_flags)
3404{
3405	struct pci_dev *pdev, *parent = start;
3406
3407	do {
3408		pdev = parent;
3409
3410		if (!pci_acs_enabled(pdev, acs_flags))
3411			return false;
3412
3413		if (pci_is_root_bus(pdev->bus))
3414			return (end == NULL);
 
3415
3416		parent = pdev->bus->self;
3417	} while (pdev != end);
 
3418
3419	return true;
 
 
3420}
 
3421
3422/**
3423 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3424 * @pdev: PCI device
3425 * @bar: BAR to find
3426 *
3427 * Helper to find the position of the ctrl register for a BAR.
3428 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3429 * Returns -ENOENT if no ctrl register for the BAR could be found.
3430 */
3431static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3432{
3433	unsigned int pos, nbars, i;
3434	u32 ctrl;
3435
3436	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3437	if (!pos)
3438		return -ENOTSUPP;
3439
3440	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3441	nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3442		    PCI_REBAR_CTRL_NBAR_SHIFT;
3443
3444	for (i = 0; i < nbars; i++, pos += 8) {
3445		int bar_idx;
3446
3447		pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3448		bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3449		if (bar_idx == bar)
3450			return pos;
3451	}
3452
3453	return -ENOENT;
3454}
3455
3456/**
3457 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3458 * @pdev: PCI device
3459 * @bar: BAR to query
 
3460 *
3461 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3462 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3463 */
3464u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3465{
3466	int pos;
3467	u32 cap;
3468
3469	pos = pci_rebar_find_pos(pdev, bar);
3470	if (pos < 0)
3471		return 0;
3472
3473	pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3474	return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3475}
3476
3477/**
3478 * pci_rebar_get_current_size - get the current size of a BAR
3479 * @pdev: PCI device
3480 * @bar: BAR to set size to
3481 *
3482 * Read the size of a BAR from the resizable BAR config.
3483 * Returns size if found or negative error code.
3484 */
3485int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3486{
3487	int pos;
3488	u32 ctrl;
 
 
 
 
3489
3490	pos = pci_rebar_find_pos(pdev, bar);
3491	if (pos < 0)
3492		return pos;
 
3493
3494	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3495	return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> PCI_REBAR_CTRL_BAR_SHIFT;
3496}
 
 
 
3497
3498/**
3499 * pci_rebar_set_size - set a new size for a BAR
3500 * @pdev: PCI device
3501 * @bar: BAR to set size to
3502 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3503 *
3504 * Set the new size of a BAR as defined in the spec.
3505 * Returns zero if resizing was successful, error code otherwise.
3506 */
3507int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3508{
3509	int pos;
3510	u32 ctrl;
3511
3512	pos = pci_rebar_find_pos(pdev, bar);
3513	if (pos < 0)
3514		return pos;
3515
3516	pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3517	ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3518	ctrl |= size << PCI_REBAR_CTRL_BAR_SHIFT;
3519	pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3520	return 0;
3521}
3522
3523/**
3524 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3525 * @dev: the PCI device
3526 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3527 *	PCI_EXP_DEVCAP2_ATOMIC_COMP32
3528 *	PCI_EXP_DEVCAP2_ATOMIC_COMP64
3529 *	PCI_EXP_DEVCAP2_ATOMIC_COMP128
3530 *
3531 * Return 0 if all upstream bridges support AtomicOp routing, egress
3532 * blocking is disabled on all upstream ports, and the root port supports
3533 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3534 * AtomicOp completion), or negative otherwise.
3535 */
3536int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3537{
3538	struct pci_bus *bus = dev->bus;
3539	struct pci_dev *bridge;
3540	u32 cap, ctl2;
3541
3542	if (!pci_is_pcie(dev))
3543		return -EINVAL;
3544
3545	/*
3546	 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3547	 * AtomicOp requesters.  For now, we only support endpoints as
3548	 * requesters and root ports as completers.  No endpoints as
3549	 * completers, and no peer-to-peer.
3550	 */
3551
3552	switch (pci_pcie_type(dev)) {
3553	case PCI_EXP_TYPE_ENDPOINT:
3554	case PCI_EXP_TYPE_LEG_END:
3555	case PCI_EXP_TYPE_RC_END:
3556		break;
3557	default:
3558		return -EINVAL;
3559	}
3560
3561	while (bus->parent) {
3562		bridge = bus->self;
 
3563
3564		pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
 
3565
3566		switch (pci_pcie_type(bridge)) {
3567		/* Ensure switch ports support AtomicOp routing */
3568		case PCI_EXP_TYPE_UPSTREAM:
3569		case PCI_EXP_TYPE_DOWNSTREAM:
3570			if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3571				return -EINVAL;
3572			break;
3573
3574		/* Ensure root port supports all the sizes we care about */
3575		case PCI_EXP_TYPE_ROOT_PORT:
3576			if ((cap & cap_mask) != cap_mask)
3577				return -EINVAL;
3578			break;
3579		}
3580
3581		/* Ensure upstream ports don't block AtomicOps on egress */
3582		if (pci_pcie_type(bridge) == PCI_EXP_TYPE_UPSTREAM) {
3583			pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3584						   &ctl2);
3585			if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3586				return -EINVAL;
3587		}
3588
3589		bus = bus->parent;
3590	}
3591
3592	pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3593				 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3594	return 0;
3595}
3596EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3597
3598/**
3599 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3600 * @dev: the PCI device
3601 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
3602 *
3603 * Perform INTx swizzling for a device behind one level of bridge.  This is
3604 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
3605 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
3606 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3607 * the PCI Express Base Specification, Revision 2.1)
3608 */
3609u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
3610{
3611	int slot;
3612
3613	if (pci_ari_enabled(dev->bus))
3614		slot = 0;
3615	else
3616		slot = PCI_SLOT(dev->devfn);
3617
3618	return (((pin - 1) + slot) % 4) + 1;
3619}
3620
3621int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
 
3622{
3623	u8 pin;
3624
3625	pin = dev->pin;
3626	if (!pin)
3627		return -1;
3628
3629	while (!pci_is_root_bus(dev->bus)) {
3630		pin = pci_swizzle_interrupt_pin(dev, pin);
3631		dev = dev->bus->self;
3632	}
3633	*bridge = dev;
3634	return pin;
3635}
3636
3637/**
3638 * pci_common_swizzle - swizzle INTx all the way to root bridge
3639 * @dev: the PCI device
3640 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3641 *
3642 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
3643 * bridges all the way up to a PCI root bus.
3644 */
3645u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3646{
3647	u8 pin = *pinp;
3648
3649	while (!pci_is_root_bus(dev->bus)) {
3650		pin = pci_swizzle_interrupt_pin(dev, pin);
3651		dev = dev->bus->self;
3652	}
3653	*pinp = pin;
3654	return PCI_SLOT(dev->devfn);
3655}
3656EXPORT_SYMBOL_GPL(pci_common_swizzle);
3657
3658/**
3659 * pci_release_region - Release a PCI bar
3660 * @pdev: PCI device whose resources were previously reserved by
3661 *	  pci_request_region()
3662 * @bar: BAR to release
3663 *
3664 * Releases the PCI I/O and memory resources previously reserved by a
3665 * successful call to pci_request_region().  Call this function only
3666 * after all use of the PCI regions has ceased.
3667 */
3668void pci_release_region(struct pci_dev *pdev, int bar)
3669{
3670	struct pci_devres *dr;
3671
3672	if (pci_resource_len(pdev, bar) == 0)
3673		return;
3674	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3675		release_region(pci_resource_start(pdev, bar),
3676				pci_resource_len(pdev, bar));
3677	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3678		release_mem_region(pci_resource_start(pdev, bar),
3679				pci_resource_len(pdev, bar));
3680
3681	dr = find_pci_dr(pdev);
3682	if (dr)
3683		dr->region_mask &= ~(1 << bar);
3684}
3685EXPORT_SYMBOL(pci_release_region);
3686
3687/**
3688 * __pci_request_region - Reserved PCI I/O and memory resource
3689 * @pdev: PCI device whose resources are to be reserved
3690 * @bar: BAR to be reserved
3691 * @res_name: Name to be associated with resource.
3692 * @exclusive: whether the region access is exclusive or not
3693 *
3694 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3695 * being reserved by owner @res_name.  Do not access any
3696 * address inside the PCI regions unless this call returns
3697 * successfully.
3698 *
3699 * If @exclusive is set, then the region is marked so that userspace
3700 * is explicitly not allowed to map the resource via /dev/mem or
3701 * sysfs MMIO access.
3702 *
3703 * Returns 0 on success, or %EBUSY on error.  A warning
3704 * message is also printed on failure.
3705 */
3706static int __pci_request_region(struct pci_dev *pdev, int bar,
3707				const char *res_name, int exclusive)
3708{
3709	struct pci_devres *dr;
3710
3711	if (pci_resource_len(pdev, bar) == 0)
3712		return 0;
3713
3714	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3715		if (!request_region(pci_resource_start(pdev, bar),
3716			    pci_resource_len(pdev, bar), res_name))
3717			goto err_out;
3718	} else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
 
3719		if (!__request_mem_region(pci_resource_start(pdev, bar),
3720					pci_resource_len(pdev, bar), res_name,
3721					exclusive))
3722			goto err_out;
3723	}
3724
3725	dr = find_pci_dr(pdev);
3726	if (dr)
3727		dr->region_mask |= 1 << bar;
3728
3729	return 0;
3730
3731err_out:
3732	pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
3733		 &pdev->resource[bar]);
3734	return -EBUSY;
3735}
3736
3737/**
3738 * pci_request_region - Reserve PCI I/O and memory resource
3739 * @pdev: PCI device whose resources are to be reserved
3740 * @bar: BAR to be reserved
3741 * @res_name: Name to be associated with resource
 
 
 
 
 
3742 *
3743 * Mark the PCI region associated with PCI device @pdev BAR @bar as
3744 * being reserved by owner @res_name.  Do not access any
3745 * address inside the PCI regions unless this call returns
3746 * successfully.
3747 *
3748 * Returns 0 on success, or %EBUSY on error.  A warning
3749 * message is also printed on failure.
3750 */
3751int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3752{
3753	return __pci_request_region(pdev, bar, res_name, 0);
3754}
3755EXPORT_SYMBOL(pci_request_region);
3756
3757/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3758 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3759 * @pdev: PCI device whose resources were previously reserved
3760 * @bars: Bitmask of BARs to be released
3761 *
3762 * Release selected PCI I/O and memory resources previously reserved.
3763 * Call this function only after all use of the PCI regions has ceased.
3764 */
3765void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3766{
3767	int i;
3768
3769	for (i = 0; i < 6; i++)
3770		if (bars & (1 << i))
3771			pci_release_region(pdev, i);
3772}
3773EXPORT_SYMBOL(pci_release_selected_regions);
3774
3775static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3776					  const char *res_name, int excl)
3777{
3778	int i;
3779
3780	for (i = 0; i < 6; i++)
3781		if (bars & (1 << i))
3782			if (__pci_request_region(pdev, i, res_name, excl))
3783				goto err_out;
3784	return 0;
3785
3786err_out:
3787	while (--i >= 0)
3788		if (bars & (1 << i))
3789			pci_release_region(pdev, i);
3790
3791	return -EBUSY;
3792}
3793
3794
3795/**
3796 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3797 * @pdev: PCI device whose resources are to be reserved
3798 * @bars: Bitmask of BARs to be requested
3799 * @res_name: Name to be associated with resource
3800 */
3801int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3802				 const char *res_name)
3803{
3804	return __pci_request_selected_regions(pdev, bars, res_name, 0);
3805}
3806EXPORT_SYMBOL(pci_request_selected_regions);
3807
3808int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3809					   const char *res_name)
3810{
3811	return __pci_request_selected_regions(pdev, bars, res_name,
3812			IORESOURCE_EXCLUSIVE);
3813}
3814EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3815
3816/**
3817 * pci_release_regions - Release reserved PCI I/O and memory resources
3818 * @pdev: PCI device whose resources were previously reserved by
3819 *	  pci_request_regions()
3820 *
3821 * Releases all PCI I/O and memory resources previously reserved by a
3822 * successful call to pci_request_regions().  Call this function only
3823 * after all use of the PCI regions has ceased.
3824 */
3825
3826void pci_release_regions(struct pci_dev *pdev)
3827{
3828	pci_release_selected_regions(pdev, (1 << 6) - 1);
3829}
3830EXPORT_SYMBOL(pci_release_regions);
3831
3832/**
3833 * pci_request_regions - Reserve PCI I/O and memory resources
3834 * @pdev: PCI device whose resources are to be reserved
3835 * @res_name: Name to be associated with resource.
 
 
 
 
 
3836 *
3837 * Mark all PCI regions associated with PCI device @pdev as
3838 * being reserved by owner @res_name.  Do not access any
3839 * address inside the PCI regions unless this call returns
3840 * successfully.
3841 *
3842 * Returns 0 on success, or %EBUSY on error.  A warning
3843 * message is also printed on failure.
3844 */
3845int pci_request_regions(struct pci_dev *pdev, const char *res_name)
3846{
3847	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
3848}
3849EXPORT_SYMBOL(pci_request_regions);
3850
3851/**
3852 * pci_request_regions_exclusive - Reserve PCI I/O and memory resources
3853 * @pdev: PCI device whose resources are to be reserved
3854 * @res_name: Name to be associated with resource.
 
 
 
 
 
3855 *
3856 * Mark all PCI regions associated with PCI device @pdev as being reserved
3857 * by owner @res_name.  Do not access any address inside the PCI regions
3858 * unless this call returns successfully.
3859 *
3860 * pci_request_regions_exclusive() will mark the region so that /dev/mem
3861 * and the sysfs MMIO access will not be allowed.
3862 *
3863 * Returns 0 on success, or %EBUSY on error.  A warning message is also
3864 * printed on failure.
3865 */
3866int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3867{
3868	return pci_request_selected_regions_exclusive(pdev,
3869					((1 << 6) - 1), res_name);
3870}
3871EXPORT_SYMBOL(pci_request_regions_exclusive);
3872
3873/*
3874 * Record the PCI IO range (expressed as CPU physical address + size).
3875 * Return a negative value if an error has occurred, zero otherwise
3876 */
3877int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3878			resource_size_t	size)
3879{
3880	int ret = 0;
3881#ifdef PCI_IOBASE
3882	struct logic_pio_hwaddr *range;
3883
3884	if (!size || addr + size < addr)
3885		return -EINVAL;
3886
3887	range = kzalloc(sizeof(*range), GFP_ATOMIC);
3888	if (!range)
3889		return -ENOMEM;
3890
3891	range->fwnode = fwnode;
3892	range->size = size;
3893	range->hw_start = addr;
3894	range->flags = LOGIC_PIO_CPU_MMIO;
3895
3896	ret = logic_pio_register_range(range);
3897	if (ret)
3898		kfree(range);
3899#endif
3900
3901	return ret;
3902}
3903
3904phys_addr_t pci_pio_to_address(unsigned long pio)
3905{
3906	phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3907
3908#ifdef PCI_IOBASE
3909	if (pio >= MMIO_UPPER_LIMIT)
3910		return address;
3911
3912	address = logic_pio_to_hwaddr(pio);
3913#endif
3914
3915	return address;
3916}
3917
3918unsigned long __weak pci_address_to_pio(phys_addr_t address)
3919{
3920#ifdef PCI_IOBASE
3921	return logic_pio_trans_cpuaddr(address);
3922#else
3923	if (address > IO_SPACE_LIMIT)
3924		return (unsigned long)-1;
3925
3926	return (unsigned long) address;
3927#endif
3928}
3929
3930/**
3931 * pci_remap_iospace - Remap the memory mapped I/O space
3932 * @res: Resource describing the I/O space
3933 * @phys_addr: physical address of range to be mapped
3934 *
3935 * Remap the memory mapped I/O space described by the @res and the CPU
3936 * physical address @phys_addr into virtual address space.  Only
3937 * architectures that have memory mapped IO functions defined (and the
3938 * PCI_IOBASE value defined) should call this function.
3939 */
3940int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
3941{
3942#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3943	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3944
3945	if (!(res->flags & IORESOURCE_IO))
3946		return -EINVAL;
3947
3948	if (res->end > IO_SPACE_LIMIT)
3949		return -EINVAL;
3950
3951	return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3952				  pgprot_device(PAGE_KERNEL));
3953#else
3954	/*
3955	 * This architecture does not have memory mapped I/O space,
3956	 * so this function should never be called
3957	 */
3958	WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3959	return -ENODEV;
3960#endif
3961}
3962EXPORT_SYMBOL(pci_remap_iospace);
3963
3964/**
3965 * pci_unmap_iospace - Unmap the memory mapped I/O space
3966 * @res: resource to be unmapped
3967 *
3968 * Unmap the CPU virtual address @res from virtual address space.  Only
3969 * architectures that have memory mapped IO functions defined (and the
3970 * PCI_IOBASE value defined) should call this function.
3971 */
3972void pci_unmap_iospace(struct resource *res)
3973{
3974#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3975	unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3976
3977	unmap_kernel_range(vaddr, resource_size(res));
3978#endif
3979}
3980EXPORT_SYMBOL(pci_unmap_iospace);
3981
3982static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
3983{
3984	struct resource **res = ptr;
3985
3986	pci_unmap_iospace(*res);
3987}
3988
3989/**
3990 * devm_pci_remap_iospace - Managed pci_remap_iospace()
3991 * @dev: Generic device to remap IO address for
3992 * @res: Resource describing the I/O space
3993 * @phys_addr: physical address of range to be mapped
3994 *
3995 * Managed pci_remap_iospace().  Map is automatically unmapped on driver
3996 * detach.
3997 */
3998int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
3999			   phys_addr_t phys_addr)
4000{
4001	const struct resource **ptr;
4002	int error;
4003
4004	ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
4005	if (!ptr)
4006		return -ENOMEM;
4007
4008	error = pci_remap_iospace(res, phys_addr);
4009	if (error) {
4010		devres_free(ptr);
4011	} else	{
4012		*ptr = res;
4013		devres_add(dev, ptr);
4014	}
4015
4016	return error;
4017}
4018EXPORT_SYMBOL(devm_pci_remap_iospace);
4019
4020/**
4021 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
4022 * @dev: Generic device to remap IO address for
4023 * @offset: Resource address to map
4024 * @size: Size of map
4025 *
4026 * Managed pci_remap_cfgspace().  Map is automatically unmapped on driver
4027 * detach.
4028 */
4029void __iomem *devm_pci_remap_cfgspace(struct device *dev,
4030				      resource_size_t offset,
4031				      resource_size_t size)
4032{
4033	void __iomem **ptr, *addr;
4034
4035	ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
4036	if (!ptr)
4037		return NULL;
4038
4039	addr = pci_remap_cfgspace(offset, size);
4040	if (addr) {
4041		*ptr = addr;
4042		devres_add(dev, ptr);
4043	} else
4044		devres_free(ptr);
4045
4046	return addr;
4047}
4048EXPORT_SYMBOL(devm_pci_remap_cfgspace);
4049
4050/**
4051 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
4052 * @dev: generic device to handle the resource for
4053 * @res: configuration space resource to be handled
4054 *
4055 * Checks that a resource is a valid memory region, requests the memory
4056 * region and ioremaps with pci_remap_cfgspace() API that ensures the
4057 * proper PCI configuration space memory attributes are guaranteed.
4058 *
4059 * All operations are managed and will be undone on driver detach.
4060 *
4061 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
4062 * on failure. Usage example::
4063 *
4064 *	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4065 *	base = devm_pci_remap_cfg_resource(&pdev->dev, res);
4066 *	if (IS_ERR(base))
4067 *		return PTR_ERR(base);
4068 */
4069void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
4070					  struct resource *res)
4071{
4072	resource_size_t size;
4073	const char *name;
4074	void __iomem *dest_ptr;
4075
4076	BUG_ON(!dev);
4077
4078	if (!res || resource_type(res) != IORESOURCE_MEM) {
4079		dev_err(dev, "invalid resource\n");
4080		return IOMEM_ERR_PTR(-EINVAL);
4081	}
4082
4083	size = resource_size(res);
4084	name = res->name ?: dev_name(dev);
4085
4086	if (!devm_request_mem_region(dev, res->start, size, name)) {
4087		dev_err(dev, "can't request region for resource %pR\n", res);
4088		return IOMEM_ERR_PTR(-EBUSY);
4089	}
4090
4091	dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
4092	if (!dest_ptr) {
4093		dev_err(dev, "ioremap failed for resource %pR\n", res);
4094		devm_release_mem_region(dev, res->start, size);
4095		dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
4096	}
4097
4098	return dest_ptr;
4099}
4100EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
4101
4102static void __pci_set_master(struct pci_dev *dev, bool enable)
4103{
4104	u16 old_cmd, cmd;
4105
4106	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
4107	if (enable)
4108		cmd = old_cmd | PCI_COMMAND_MASTER;
4109	else
4110		cmd = old_cmd & ~PCI_COMMAND_MASTER;
4111	if (cmd != old_cmd) {
4112		pci_dbg(dev, "%s bus mastering\n",
4113			enable ? "enabling" : "disabling");
4114		pci_write_config_word(dev, PCI_COMMAND, cmd);
4115	}
4116	dev->is_busmaster = enable;
4117}
4118
4119/**
4120 * pcibios_setup - process "pci=" kernel boot arguments
4121 * @str: string used to pass in "pci=" kernel boot arguments
4122 *
4123 * Process kernel boot arguments.  This is the default implementation.
4124 * Architecture specific implementations can override this as necessary.
4125 */
4126char * __weak __init pcibios_setup(char *str)
4127{
4128	return str;
4129}
4130
4131/**
4132 * pcibios_set_master - enable PCI bus-mastering for device dev
4133 * @dev: the PCI device to enable
4134 *
4135 * Enables PCI bus-mastering for the device.  This is the default
4136 * implementation.  Architecture specific implementations can override
4137 * this if necessary.
4138 */
4139void __weak pcibios_set_master(struct pci_dev *dev)
4140{
4141	u8 lat;
4142
4143	/* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
4144	if (pci_is_pcie(dev))
4145		return;
4146
4147	pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
4148	if (lat < 16)
4149		lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
4150	else if (lat > pcibios_max_latency)
4151		lat = pcibios_max_latency;
4152	else
4153		return;
4154
4155	pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
4156}
4157
4158/**
4159 * pci_set_master - enables bus-mastering for device dev
4160 * @dev: the PCI device to enable
4161 *
4162 * Enables bus-mastering on the device and calls pcibios_set_master()
4163 * to do the needed arch specific settings.
4164 */
4165void pci_set_master(struct pci_dev *dev)
4166{
4167	__pci_set_master(dev, true);
4168	pcibios_set_master(dev);
4169}
4170EXPORT_SYMBOL(pci_set_master);
4171
4172/**
4173 * pci_clear_master - disables bus-mastering for device dev
4174 * @dev: the PCI device to disable
4175 */
4176void pci_clear_master(struct pci_dev *dev)
4177{
4178	__pci_set_master(dev, false);
4179}
4180EXPORT_SYMBOL(pci_clear_master);
4181
4182/**
4183 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
4184 * @dev: the PCI device for which MWI is to be enabled
4185 *
4186 * Helper function for pci_set_mwi.
4187 * Originally copied from drivers/net/acenic.c.
4188 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
4189 *
4190 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4191 */
4192int pci_set_cacheline_size(struct pci_dev *dev)
4193{
4194	u8 cacheline_size;
4195
4196	if (!pci_cache_line_size)
4197		return -EINVAL;
4198
4199	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
4200	   equal to or multiple of the right value. */
4201	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4202	if (cacheline_size >= pci_cache_line_size &&
4203	    (cacheline_size % pci_cache_line_size) == 0)
4204		return 0;
4205
4206	/* Write the correct value. */
4207	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
4208	/* Read it back. */
4209	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
4210	if (cacheline_size == pci_cache_line_size)
4211		return 0;
4212
4213	pci_info(dev, "cache line size of %d is not supported\n",
4214		   pci_cache_line_size << 2);
4215
4216	return -EINVAL;
4217}
4218EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
4219
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4220/**
4221 * pci_set_mwi - enables memory-write-invalidate PCI transaction
4222 * @dev: the PCI device for which MWI is enabled
4223 *
4224 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4225 *
4226 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4227 */
4228int pci_set_mwi(struct pci_dev *dev)
 
4229{
4230#ifdef PCI_DISABLE_MWI
4231	return 0;
4232#else
4233	int rc;
4234	u16 cmd;
4235
4236	rc = pci_set_cacheline_size(dev);
4237	if (rc)
4238		return rc;
4239
4240	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4241	if (!(cmd & PCI_COMMAND_INVALIDATE)) {
4242		pci_dbg(dev, "enabling Mem-Wr-Inval\n");
4243		cmd |= PCI_COMMAND_INVALIDATE;
4244		pci_write_config_word(dev, PCI_COMMAND, cmd);
4245	}
 
4246	return 0;
4247#endif
4248}
4249EXPORT_SYMBOL(pci_set_mwi);
4250
4251/**
4252 * pcim_set_mwi - a device-managed pci_set_mwi()
4253 * @dev: the PCI device for which MWI is enabled
4254 *
4255 * Managed pci_set_mwi().
4256 *
4257 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4258 */
4259int pcim_set_mwi(struct pci_dev *dev)
4260{
4261	struct pci_devres *dr;
4262
4263	dr = find_pci_dr(dev);
4264	if (!dr)
4265		return -ENOMEM;
4266
4267	dr->mwi = 1;
4268	return pci_set_mwi(dev);
4269}
4270EXPORT_SYMBOL(pcim_set_mwi);
4271
4272/**
4273 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
4274 * @dev: the PCI device for which MWI is enabled
4275 *
4276 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
4277 * Callers are not required to check the return value.
4278 *
4279 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
4280 */
4281int pci_try_set_mwi(struct pci_dev *dev)
4282{
4283#ifdef PCI_DISABLE_MWI
4284	return 0;
4285#else
4286	return pci_set_mwi(dev);
4287#endif
4288}
4289EXPORT_SYMBOL(pci_try_set_mwi);
4290
4291/**
4292 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
4293 * @dev: the PCI device to disable
4294 *
4295 * Disables PCI Memory-Write-Invalidate transaction on the device
4296 */
4297void pci_clear_mwi(struct pci_dev *dev)
 
4298{
4299#ifndef PCI_DISABLE_MWI
4300	u16 cmd;
4301
4302	pci_read_config_word(dev, PCI_COMMAND, &cmd);
4303	if (cmd & PCI_COMMAND_INVALIDATE) {
4304		cmd &= ~PCI_COMMAND_INVALIDATE;
4305		pci_write_config_word(dev, PCI_COMMAND, cmd);
4306	}
4307#endif
4308}
4309EXPORT_SYMBOL(pci_clear_mwi);
4310
4311/**
4312 * pci_intx - enables/disables PCI INTx for device dev
4313 * @pdev: the PCI device to operate on
4314 * @enable: boolean: whether to enable or disable PCI INTx
4315 *
4316 * Enables/disables PCI INTx for device @pdev
4317 */
4318void pci_intx(struct pci_dev *pdev, int enable)
 
4319{
4320	u16 pci_command, new;
4321
4322	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
4323
4324	if (enable)
4325		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
4326	else
4327		new = pci_command | PCI_COMMAND_INTX_DISABLE;
 
4328
4329	if (new != pci_command) {
4330		struct pci_devres *dr;
4331
4332		pci_write_config_word(pdev, PCI_COMMAND, new);
4333
4334		dr = find_pci_dr(pdev);
4335		if (dr && !dr->restore_intx) {
4336			dr->restore_intx = 1;
4337			dr->orig_intx = !enable;
4338		}
4339	}
4340}
4341EXPORT_SYMBOL_GPL(pci_intx);
4342
4343static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
4344{
4345	struct pci_bus *bus = dev->bus;
4346	bool mask_updated = true;
4347	u32 cmd_status_dword;
4348	u16 origcmd, newcmd;
4349	unsigned long flags;
4350	bool irq_pending;
4351
4352	/*
4353	 * We do a single dword read to retrieve both command and status.
4354	 * Document assumptions that make this possible.
4355	 */
4356	BUILD_BUG_ON(PCI_COMMAND % 4);
4357	BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
4358
4359	raw_spin_lock_irqsave(&pci_lock, flags);
4360
4361	bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
4362
4363	irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
4364
4365	/*
4366	 * Check interrupt status register to see whether our device
4367	 * triggered the interrupt (when masking) or the next IRQ is
4368	 * already pending (when unmasking).
4369	 */
4370	if (mask != irq_pending) {
4371		mask_updated = false;
4372		goto done;
4373	}
4374
4375	origcmd = cmd_status_dword;
4376	newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
4377	if (mask)
4378		newcmd |= PCI_COMMAND_INTX_DISABLE;
4379	if (newcmd != origcmd)
4380		bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
4381
4382done:
4383	raw_spin_unlock_irqrestore(&pci_lock, flags);
4384
4385	return mask_updated;
4386}
4387
4388/**
4389 * pci_check_and_mask_intx - mask INTx on pending interrupt
4390 * @dev: the PCI device to operate on
4391 *
4392 * Check if the device dev has its INTx line asserted, mask it and return
4393 * true in that case. False is returned if no interrupt was pending.
 
4394 */
4395bool pci_check_and_mask_intx(struct pci_dev *dev)
4396{
4397	return pci_check_and_set_intx_mask(dev, true);
4398}
4399EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
4400
4401/**
4402 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
4403 * @dev: the PCI device to operate on
4404 *
4405 * Check if the device dev has its INTx line asserted, unmask it if not and
4406 * return true. False is returned and the mask remains active if there was
4407 * still an interrupt pending.
4408 */
4409bool pci_check_and_unmask_intx(struct pci_dev *dev)
4410{
4411	return pci_check_and_set_intx_mask(dev, false);
 
4412}
4413EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
4414
4415/**
4416 * pci_wait_for_pending_transaction - wait for pending transaction
4417 * @dev: the PCI device to operate on
4418 *
4419 * Return 0 if transaction is pending 1 otherwise.
4420 */
4421int pci_wait_for_pending_transaction(struct pci_dev *dev)
4422{
4423	if (!pci_is_pcie(dev))
4424		return 1;
4425
4426	return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
4427				    PCI_EXP_DEVSTA_TRPND);
4428}
4429EXPORT_SYMBOL(pci_wait_for_pending_transaction);
4430
4431static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
4432{
4433	int delay = 1;
4434	u32 id;
4435
4436	/*
4437	 * After reset, the device should not silently discard config
4438	 * requests, but it may still indicate that it needs more time by
4439	 * responding to them with CRS completions.  The Root Port will
4440	 * generally synthesize ~0 data to complete the read (except when
4441	 * CRS SV is enabled and the read was for the Vendor ID; in that
4442	 * case it synthesizes 0x0001 data).
4443	 *
4444	 * Wait for the device to return a non-CRS completion.  Read the
4445	 * Command register instead of Vendor ID so we don't have to
4446	 * contend with the CRS SV value.
4447	 */
4448	pci_read_config_dword(dev, PCI_COMMAND, &id);
4449	while (id == ~0) {
4450		if (delay > timeout) {
4451			pci_warn(dev, "not ready %dms after %s; giving up\n",
4452				 delay - 1, reset_type);
4453			return -ENOTTY;
4454		}
4455
4456		if (delay > 1000)
4457			pci_info(dev, "not ready %dms after %s; waiting\n",
4458				 delay - 1, reset_type);
4459
4460		msleep(delay);
4461		delay *= 2;
4462		pci_read_config_dword(dev, PCI_COMMAND, &id);
4463	}
4464
4465	if (delay > 1000)
4466		pci_info(dev, "ready %dms after %s\n", delay - 1,
4467			 reset_type);
4468
4469	return 0;
4470}
 
4471
4472/**
4473 * pcie_has_flr - check if a device supports function level resets
4474 * @dev: device to check
4475 *
4476 * Returns true if the device advertises support for PCIe function level
4477 * resets.
4478 */
4479bool pcie_has_flr(struct pci_dev *dev)
4480{
 
 
4481	u32 cap;
 
4482
4483	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4484		return false;
 
4485
4486	pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4487	return cap & PCI_EXP_DEVCAP_FLR;
4488}
4489EXPORT_SYMBOL_GPL(pcie_has_flr);
4490
4491/**
4492 * pcie_flr - initiate a PCIe function level reset
4493 * @dev: device to reset
4494 *
4495 * Initiate a function level reset on @dev.  The caller should ensure the
4496 * device supports FLR before calling this function, e.g. by using the
4497 * pcie_has_flr() helper.
4498 */
4499int pcie_flr(struct pci_dev *dev)
4500{
4501	if (!pci_wait_for_pending_transaction(dev))
4502		pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
4503
4504	pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
 
 
 
4505
4506	if (dev->imm_ready)
4507		return 0;
 
 
 
 
 
 
 
 
 
 
4508
4509	/*
4510	 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4511	 * 100ms, but may silently discard requests while the FLR is in
4512	 * progress.  Wait 100ms before trying to access the device.
4513	 */
4514	msleep(100);
4515
4516	return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
4517}
4518EXPORT_SYMBOL_GPL(pcie_flr);
4519
4520static int pci_af_flr(struct pci_dev *dev, int probe)
4521{
 
4522	int pos;
4523	u8 cap;
 
4524
4525	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4526	if (!pos)
4527		return -ENOTTY;
4528
4529	if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4530		return -ENOTTY;
4531
4532	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
4533	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4534		return -ENOTTY;
4535
4536	if (probe)
4537		return 0;
4538
4539	/*
4540	 * Wait for Transaction Pending bit to clear.  A word-aligned test
4541	 * is used, so we use the control offset rather than status and shift
4542	 * the test bit to match.
4543	 */
4544	if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
4545				 PCI_AF_STATUS_TP << 8))
4546		pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
4547
4548	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
 
 
 
4549
4550	if (dev->imm_ready)
4551		return 0;
4552
4553	/*
4554	 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4555	 * updated 27 July 2006; a device must complete an FLR within
4556	 * 100ms, but may silently discard requests while the FLR is in
4557	 * progress.  Wait 100ms before trying to access the device.
4558	 */
4559	msleep(100);
4560
4561	return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
4562}
4563
4564/**
4565 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4566 * @dev: Device to reset.
4567 * @probe: If set, only check if the device can be reset this way.
4568 *
4569 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4570 * unset, it will be reinitialized internally when going from PCI_D3hot to
4571 * PCI_D0.  If that's the case and the device is not in a low-power state
4572 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4573 *
4574 * NOTE: This causes the caller to sleep for twice the device power transition
4575 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
4576 * by default (i.e. unless the @dev's d3_delay field has a different value).
4577 * Moreover, only devices in D0 can be reset by this function.
4578 */
4579static int pci_pm_reset(struct pci_dev *dev, int probe)
4580{
4581	u16 csr;
4582
4583	if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
4584		return -ENOTTY;
4585
4586	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4587	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4588		return -ENOTTY;
4589
4590	if (probe)
4591		return 0;
4592
4593	if (dev->current_state != PCI_D0)
4594		return -EINVAL;
4595
4596	csr &= ~PCI_PM_CTRL_STATE_MASK;
4597	csr |= PCI_D3hot;
4598	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4599	pci_dev_d3_sleep(dev);
4600
4601	csr &= ~PCI_PM_CTRL_STATE_MASK;
4602	csr |= PCI_D0;
4603	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
4604	pci_dev_d3_sleep(dev);
4605
4606	return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
4607}
4608/**
4609 * pcie_wait_for_link - Wait until link is active or inactive
4610 * @pdev: Bridge device
4611 * @active: waiting for active or inactive?
4612 *
4613 * Use this to wait till link becomes active or inactive.
4614 */
4615bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4616{
4617	int timeout = 1000;
4618	bool ret;
4619	u16 lnk_status;
4620
4621	/*
4622	 * Some controllers might not implement link active reporting. In this
4623	 * case, we wait for 1000 + 100 ms.
4624	 */
4625	if (!pdev->link_active_reporting) {
4626		msleep(1100);
4627		return true;
4628	}
4629
4630	/*
4631	 * PCIe r4.0 sec 6.6.1, a component must enter LTSSM Detect within 20ms,
4632	 * after which we should expect an link active if the reset was
4633	 * successful. If so, software must wait a minimum 100ms before sending
4634	 * configuration requests to devices downstream this port.
4635	 *
4636	 * If the link fails to activate, either the device was physically
4637	 * removed or the link is permanently failed.
4638	 */
4639	if (active)
4640		msleep(20);
4641	for (;;) {
4642		pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4643		ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4644		if (ret == active)
4645			break;
4646		if (timeout <= 0)
4647			break;
4648		msleep(10);
4649		timeout -= 10;
4650	}
4651	if (active && ret)
4652		msleep(100);
4653	else if (ret != active)
4654		pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4655			active ? "set" : "cleared");
4656	return ret == active;
4657}
4658
4659void pci_reset_secondary_bus(struct pci_dev *dev)
4660{
4661	u16 ctrl;
4662
4663	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4664	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4665	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4666
4667	/*
4668	 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms.  Double
4669	 * this to 2ms to ensure that we meet the minimum requirement.
4670	 */
4671	msleep(2);
4672
4673	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4674	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
4675
4676	/*
4677	 * Trhfa for conventional PCI is 2^25 clock cycles.
4678	 * Assuming a minimum 33MHz clock this results in a 1s
4679	 * delay before we can consider subordinate devices to
4680	 * be re-initialized.  PCIe has some ways to shorten this,
4681	 * but we don't make use of them yet.
4682	 */
4683	ssleep(1);
4684}
4685
4686void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4687{
4688	pci_reset_secondary_bus(dev);
4689}
4690
4691/**
4692 * pci_bridge_secondary_bus_reset - Reset the secondary bus on a PCI bridge.
4693 * @dev: Bridge device
4694 *
4695 * Use the bridge control register to assert reset on the secondary bus.
4696 * Devices on the secondary bus are left in power-on state.
4697 */
4698int pci_bridge_secondary_bus_reset(struct pci_dev *dev)
4699{
4700	pcibios_reset_secondary_bus(dev);
4701
4702	return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
4703}
4704EXPORT_SYMBOL_GPL(pci_bridge_secondary_bus_reset);
4705
4706static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4707{
 
4708	struct pci_dev *pdev;
4709
4710	if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4711	    !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4712		return -ENOTTY;
4713
4714	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4715		if (pdev != dev)
4716			return -ENOTTY;
4717
4718	if (probe)
4719		return 0;
4720
4721	return pci_bridge_secondary_bus_reset(dev->bus->self);
4722}
4723
4724static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4725{
4726	int rc = -ENOTTY;
4727
4728	if (!hotplug || !try_module_get(hotplug->owner))
4729		return rc;
4730
4731	if (hotplug->ops->reset_slot)
4732		rc = hotplug->ops->reset_slot(hotplug, probe);
4733
4734	module_put(hotplug->owner);
 
 
4735
4736	return rc;
4737}
4738
4739static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4740{
4741	struct pci_dev *pdev;
4742
4743	if (dev->subordinate || !dev->slot ||
4744	    dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
4745		return -ENOTTY;
4746
4747	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4748		if (pdev != dev && pdev->slot == dev->slot)
4749			return -ENOTTY;
4750
4751	return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4752}
4753
4754static void pci_dev_lock(struct pci_dev *dev)
4755{
4756	pci_cfg_access_lock(dev);
4757	/* block PM suspend, driver probe, etc. */
4758	device_lock(&dev->dev);
4759}
4760
4761/* Return 1 on successful lock, 0 on contention */
4762static int pci_dev_trylock(struct pci_dev *dev)
4763{
4764	if (pci_cfg_access_trylock(dev)) {
4765		if (device_trylock(&dev->dev))
4766			return 1;
4767		pci_cfg_access_unlock(dev);
4768	}
4769
4770	return 0;
4771}
4772
4773static void pci_dev_unlock(struct pci_dev *dev)
4774{
4775	device_unlock(&dev->dev);
4776	pci_cfg_access_unlock(dev);
4777}
4778
4779static void pci_dev_save_and_disable(struct pci_dev *dev)
4780{
4781	const struct pci_error_handlers *err_handler =
4782			dev->driver ? dev->driver->err_handler : NULL;
4783
4784	/*
4785	 * dev->driver->err_handler->reset_prepare() is protected against
4786	 * races with ->remove() by the device lock, which must be held by
4787	 * the caller.
4788	 */
4789	if (err_handler && err_handler->reset_prepare)
4790		err_handler->reset_prepare(dev);
4791
4792	/*
4793	 * Wake-up device prior to save.  PM registers default to D0 after
4794	 * reset and a simple register restore doesn't reliably return
4795	 * to a non-D0 state anyway.
4796	 */
4797	pci_set_power_state(dev, PCI_D0);
4798
4799	pci_save_state(dev);
4800	/*
4801	 * Disable the device by clearing the Command register, except for
4802	 * INTx-disable which is set.  This not only disables MMIO and I/O port
4803	 * BARs, but also prevents the device from being Bus Master, preventing
4804	 * DMA from the device including MSI/MSI-X interrupts.  For PCI 2.3
4805	 * compliant devices, INTx-disable prevents legacy interrupts.
4806	 */
4807	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4808}
4809
4810static void pci_dev_restore(struct pci_dev *dev)
4811{
4812	const struct pci_error_handlers *err_handler =
4813			dev->driver ? dev->driver->err_handler : NULL;
4814
4815	pci_restore_state(dev);
 
 
 
 
 
4816
4817	/*
4818	 * dev->driver->err_handler->reset_done() is protected against
4819	 * races with ->remove() by the device lock, which must be held by
4820	 * the caller.
4821	 */
4822	if (err_handler && err_handler->reset_done)
4823		err_handler->reset_done(dev);
4824}
4825
4826/**
4827 * __pci_reset_function_locked - reset a PCI device function while holding
4828 * the @dev mutex lock.
4829 * @dev: PCI device to reset
4830 *
4831 * Some devices allow an individual function to be reset without affecting
4832 * other functions in the same device.  The PCI device must be responsive
4833 * to PCI config space in order to use this function.
4834 *
4835 * The device function is presumed to be unused and the caller is holding
4836 * the device mutex lock when this function is called.
4837 *
4838 * Resetting the device will make the contents of PCI configuration space
4839 * random, so any caller of this must be prepared to reinitialise the
4840 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4841 * etc.
4842 *
4843 * Returns 0 if the device function was successfully reset or negative if the
4844 * device doesn't support resetting a single function.
4845 */
4846int __pci_reset_function_locked(struct pci_dev *dev)
4847{
4848	int rc;
4849
4850	might_sleep();
4851
4852	/*
4853	 * A reset method returns -ENOTTY if it doesn't support this device
4854	 * and we should try the next method.
4855	 *
4856	 * If it returns 0 (success), we're finished.  If it returns any
4857	 * other error, we're also finished: this indicates that further
4858	 * reset mechanisms might be broken on the device.
4859	 */
4860	rc = pci_dev_specific_reset(dev, 0);
4861	if (rc != -ENOTTY)
4862		return rc;
4863	if (pcie_has_flr(dev)) {
4864		rc = pcie_flr(dev);
4865		if (rc != -ENOTTY)
4866			return rc;
4867	}
4868	rc = pci_af_flr(dev, 0);
4869	if (rc != -ENOTTY)
4870		return rc;
4871	rc = pci_pm_reset(dev, 0);
4872	if (rc != -ENOTTY)
4873		return rc;
4874	rc = pci_dev_reset_slot_function(dev, 0);
4875	if (rc != -ENOTTY)
4876		return rc;
4877	return pci_parent_bus_reset(dev, 0);
4878}
4879EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4880
4881/**
4882 * pci_probe_reset_function - check whether the device can be safely reset
4883 * @dev: PCI device to reset
4884 *
4885 * Some devices allow an individual function to be reset without affecting
4886 * other functions in the same device.  The PCI device must be responsive
4887 * to PCI config space in order to use this function.
4888 *
4889 * Returns 0 if the device function can be reset or negative if the
4890 * device doesn't support resetting a single function.
4891 */
4892int pci_probe_reset_function(struct pci_dev *dev)
4893{
4894	int rc;
4895
4896	might_sleep();
4897
4898	rc = pci_dev_specific_reset(dev, 1);
4899	if (rc != -ENOTTY)
4900		return rc;
4901	if (pcie_has_flr(dev))
4902		return 0;
4903	rc = pci_af_flr(dev, 1);
4904	if (rc != -ENOTTY)
4905		return rc;
4906	rc = pci_pm_reset(dev, 1);
4907	if (rc != -ENOTTY)
4908		return rc;
4909	rc = pci_dev_reset_slot_function(dev, 1);
4910	if (rc != -ENOTTY)
4911		return rc;
4912
4913	return pci_parent_bus_reset(dev, 1);
4914}
4915
4916/**
4917 * pci_reset_function - quiesce and reset a PCI device function
4918 * @dev: PCI device to reset
4919 *
4920 * Some devices allow an individual function to be reset without affecting
4921 * other functions in the same device.  The PCI device must be responsive
4922 * to PCI config space in order to use this function.
4923 *
4924 * This function does not just reset the PCI portion of a device, but
4925 * clears all the state associated with the device.  This function differs
4926 * from __pci_reset_function_locked() in that it saves and restores device state
4927 * over the reset and takes the PCI device lock.
4928 *
4929 * Returns 0 if the device function was successfully reset or negative if the
4930 * device doesn't support resetting a single function.
4931 */
4932int pci_reset_function(struct pci_dev *dev)
4933{
4934	int rc;
4935
4936	if (!dev->reset_fn)
4937		return -ENOTTY;
4938
4939	pci_dev_lock(dev);
4940	pci_dev_save_and_disable(dev);
4941
4942	rc = __pci_reset_function_locked(dev);
4943
4944	pci_dev_restore(dev);
4945	pci_dev_unlock(dev);
4946
4947	return rc;
4948}
4949EXPORT_SYMBOL_GPL(pci_reset_function);
4950
4951/**
4952 * pci_reset_function_locked - quiesce and reset a PCI device function
4953 * @dev: PCI device to reset
4954 *
4955 * Some devices allow an individual function to be reset without affecting
4956 * other functions in the same device.  The PCI device must be responsive
4957 * to PCI config space in order to use this function.
4958 *
4959 * This function does not just reset the PCI portion of a device, but
4960 * clears all the state associated with the device.  This function differs
4961 * from __pci_reset_function_locked() in that it saves and restores device state
4962 * over the reset.  It also differs from pci_reset_function() in that it
4963 * requires the PCI device lock to be held.
4964 *
4965 * Returns 0 if the device function was successfully reset or negative if the
4966 * device doesn't support resetting a single function.
4967 */
4968int pci_reset_function_locked(struct pci_dev *dev)
4969{
4970	int rc;
4971
4972	if (!dev->reset_fn)
4973		return -ENOTTY;
4974
4975	pci_dev_save_and_disable(dev);
4976
4977	rc = __pci_reset_function_locked(dev);
4978
4979	pci_dev_restore(dev);
4980
4981	return rc;
4982}
4983EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4984
4985/**
4986 * pci_try_reset_function - quiesce and reset a PCI device function
4987 * @dev: PCI device to reset
4988 *
4989 * Same as above, except return -EAGAIN if unable to lock device.
4990 */
4991int pci_try_reset_function(struct pci_dev *dev)
4992{
4993	int rc;
4994
4995	if (!dev->reset_fn)
4996		return -ENOTTY;
4997
4998	if (!pci_dev_trylock(dev))
4999		return -EAGAIN;
5000
5001	pci_dev_save_and_disable(dev);
5002	rc = __pci_reset_function_locked(dev);
5003	pci_dev_restore(dev);
5004	pci_dev_unlock(dev);
5005
5006	return rc;
5007}
5008EXPORT_SYMBOL_GPL(pci_try_reset_function);
5009
5010/* Do any devices on or below this bus prevent a bus reset? */
5011static bool pci_bus_resetable(struct pci_bus *bus)
5012{
5013	struct pci_dev *dev;
5014
5015
5016	if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5017		return false;
5018
5019	list_for_each_entry(dev, &bus->devices, bus_list) {
5020		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5021		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5022			return false;
5023	}
5024
5025	return true;
5026}
5027
5028/* Lock devices from the top of the tree down */
5029static void pci_bus_lock(struct pci_bus *bus)
5030{
5031	struct pci_dev *dev;
5032
5033	list_for_each_entry(dev, &bus->devices, bus_list) {
5034		pci_dev_lock(dev);
5035		if (dev->subordinate)
5036			pci_bus_lock(dev->subordinate);
5037	}
5038}
5039
5040/* Unlock devices from the bottom of the tree up */
5041static void pci_bus_unlock(struct pci_bus *bus)
5042{
5043	struct pci_dev *dev;
5044
5045	list_for_each_entry(dev, &bus->devices, bus_list) {
5046		if (dev->subordinate)
5047			pci_bus_unlock(dev->subordinate);
5048		pci_dev_unlock(dev);
5049	}
5050}
5051
5052/* Return 1 on successful lock, 0 on contention */
5053static int pci_bus_trylock(struct pci_bus *bus)
5054{
5055	struct pci_dev *dev;
5056
5057	list_for_each_entry(dev, &bus->devices, bus_list) {
5058		if (!pci_dev_trylock(dev))
5059			goto unlock;
5060		if (dev->subordinate) {
5061			if (!pci_bus_trylock(dev->subordinate)) {
5062				pci_dev_unlock(dev);
5063				goto unlock;
5064			}
5065		}
5066	}
5067	return 1;
5068
5069unlock:
5070	list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
5071		if (dev->subordinate)
5072			pci_bus_unlock(dev->subordinate);
5073		pci_dev_unlock(dev);
5074	}
5075	return 0;
5076}
5077
5078/* Do any devices on or below this slot prevent a bus reset? */
5079static bool pci_slot_resetable(struct pci_slot *slot)
5080{
5081	struct pci_dev *dev;
5082
5083	if (slot->bus->self &&
5084	    (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
5085		return false;
5086
5087	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5088		if (!dev->slot || dev->slot != slot)
5089			continue;
5090		if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
5091		    (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
5092			return false;
5093	}
5094
5095	return true;
5096}
5097
5098/* Lock devices from the top of the tree down */
5099static void pci_slot_lock(struct pci_slot *slot)
5100{
5101	struct pci_dev *dev;
5102
5103	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5104		if (!dev->slot || dev->slot != slot)
5105			continue;
5106		pci_dev_lock(dev);
5107		if (dev->subordinate)
5108			pci_bus_lock(dev->subordinate);
5109	}
5110}
5111
5112/* Unlock devices from the bottom of the tree up */
5113static void pci_slot_unlock(struct pci_slot *slot)
5114{
5115	struct pci_dev *dev;
5116
5117	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5118		if (!dev->slot || dev->slot != slot)
5119			continue;
5120		if (dev->subordinate)
5121			pci_bus_unlock(dev->subordinate);
5122		pci_dev_unlock(dev);
5123	}
5124}
5125
5126/* Return 1 on successful lock, 0 on contention */
5127static int pci_slot_trylock(struct pci_slot *slot)
5128{
5129	struct pci_dev *dev;
5130
5131	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5132		if (!dev->slot || dev->slot != slot)
5133			continue;
5134		if (!pci_dev_trylock(dev))
5135			goto unlock;
5136		if (dev->subordinate) {
5137			if (!pci_bus_trylock(dev->subordinate)) {
5138				pci_dev_unlock(dev);
5139				goto unlock;
5140			}
5141		}
5142	}
5143	return 1;
5144
5145unlock:
5146	list_for_each_entry_continue_reverse(dev,
5147					     &slot->bus->devices, bus_list) {
5148		if (!dev->slot || dev->slot != slot)
5149			continue;
5150		if (dev->subordinate)
5151			pci_bus_unlock(dev->subordinate);
5152		pci_dev_unlock(dev);
5153	}
5154	return 0;
5155}
5156
5157/*
5158 * Save and disable devices from the top of the tree down while holding
5159 * the @dev mutex lock for the entire tree.
5160 */
5161static void pci_bus_save_and_disable_locked(struct pci_bus *bus)
5162{
5163	struct pci_dev *dev;
5164
5165	list_for_each_entry(dev, &bus->devices, bus_list) {
5166		pci_dev_save_and_disable(dev);
5167		if (dev->subordinate)
5168			pci_bus_save_and_disable_locked(dev->subordinate);
5169	}
5170}
5171
5172/*
5173 * Restore devices from top of the tree down while holding @dev mutex lock
5174 * for the entire tree.  Parent bridges need to be restored before we can
5175 * get to subordinate devices.
5176 */
5177static void pci_bus_restore_locked(struct pci_bus *bus)
5178{
5179	struct pci_dev *dev;
5180
5181	list_for_each_entry(dev, &bus->devices, bus_list) {
5182		pci_dev_restore(dev);
5183		if (dev->subordinate)
5184			pci_bus_restore_locked(dev->subordinate);
5185	}
5186}
5187
5188/*
5189 * Save and disable devices from the top of the tree down while holding
5190 * the @dev mutex lock for the entire tree.
5191 */
5192static void pci_slot_save_and_disable_locked(struct pci_slot *slot)
5193{
5194	struct pci_dev *dev;
5195
5196	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5197		if (!dev->slot || dev->slot != slot)
5198			continue;
5199		pci_dev_save_and_disable(dev);
5200		if (dev->subordinate)
5201			pci_bus_save_and_disable_locked(dev->subordinate);
5202	}
5203}
5204
5205/*
5206 * Restore devices from top of the tree down while holding @dev mutex lock
5207 * for the entire tree.  Parent bridges need to be restored before we can
5208 * get to subordinate devices.
5209 */
5210static void pci_slot_restore_locked(struct pci_slot *slot)
5211{
5212	struct pci_dev *dev;
5213
5214	list_for_each_entry(dev, &slot->bus->devices, bus_list) {
5215		if (!dev->slot || dev->slot != slot)
5216			continue;
5217		pci_dev_restore(dev);
5218		if (dev->subordinate)
5219			pci_bus_restore_locked(dev->subordinate);
5220	}
5221}
5222
5223static int pci_slot_reset(struct pci_slot *slot, int probe)
5224{
5225	int rc;
5226
5227	if (!slot || !pci_slot_resetable(slot))
5228		return -ENOTTY;
5229
5230	if (!probe)
5231		pci_slot_lock(slot);
5232
5233	might_sleep();
5234
5235	rc = pci_reset_hotplug_slot(slot->hotplug, probe);
5236
5237	if (!probe)
5238		pci_slot_unlock(slot);
5239
5240	return rc;
5241}
5242
5243/**
5244 * pci_probe_reset_slot - probe whether a PCI slot can be reset
5245 * @slot: PCI slot to probe
5246 *
5247 * Return 0 if slot can be reset, negative if a slot reset is not supported.
5248 */
5249int pci_probe_reset_slot(struct pci_slot *slot)
5250{
5251	return pci_slot_reset(slot, 1);
5252}
5253EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
5254
5255/**
5256 * __pci_reset_slot - Try to reset a PCI slot
5257 * @slot: PCI slot to reset
5258 *
5259 * A PCI bus may host multiple slots, each slot may support a reset mechanism
5260 * independent of other slots.  For instance, some slots may support slot power
5261 * control.  In the case of a 1:1 bus to slot architecture, this function may
5262 * wrap the bus reset to avoid spurious slot related events such as hotplug.
5263 * Generally a slot reset should be attempted before a bus reset.  All of the
5264 * function of the slot and any subordinate buses behind the slot are reset
5265 * through this function.  PCI config space of all devices in the slot and
5266 * behind the slot is saved before and restored after reset.
5267 *
5268 * Same as above except return -EAGAIN if the slot cannot be locked
5269 */
5270static int __pci_reset_slot(struct pci_slot *slot)
5271{
5272	int rc;
5273
5274	rc = pci_slot_reset(slot, 1);
5275	if (rc)
5276		return rc;
5277
5278	if (pci_slot_trylock(slot)) {
5279		pci_slot_save_and_disable_locked(slot);
5280		might_sleep();
5281		rc = pci_reset_hotplug_slot(slot->hotplug, 0);
5282		pci_slot_restore_locked(slot);
5283		pci_slot_unlock(slot);
5284	} else
5285		rc = -EAGAIN;
5286
5287	return rc;
5288}
5289
5290static int pci_bus_reset(struct pci_bus *bus, int probe)
5291{
5292	int ret;
5293
5294	if (!bus->self || !pci_bus_resetable(bus))
5295		return -ENOTTY;
5296
5297	if (probe)
5298		return 0;
5299
5300	pci_bus_lock(bus);
5301
5302	might_sleep();
5303
5304	ret = pci_bridge_secondary_bus_reset(bus->self);
5305
5306	pci_bus_unlock(bus);
5307
5308	return ret;
5309}
5310
5311/**
5312 * pci_bus_error_reset - reset the bridge's subordinate bus
5313 * @bridge: The parent device that connects to the bus to reset
5314 *
5315 * This function will first try to reset the slots on this bus if the method is
5316 * available. If slot reset fails or is not available, this will fall back to a
5317 * secondary bus reset.
5318 */
5319int pci_bus_error_reset(struct pci_dev *bridge)
5320{
5321	struct pci_bus *bus = bridge->subordinate;
5322	struct pci_slot *slot;
5323
5324	if (!bus)
5325		return -ENOTTY;
5326
5327	mutex_lock(&pci_slot_mutex);
5328	if (list_empty(&bus->slots))
5329		goto bus_reset;
5330
5331	list_for_each_entry(slot, &bus->slots, list)
5332		if (pci_probe_reset_slot(slot))
5333			goto bus_reset;
5334
5335	list_for_each_entry(slot, &bus->slots, list)
5336		if (pci_slot_reset(slot, 0))
5337			goto bus_reset;
5338
5339	mutex_unlock(&pci_slot_mutex);
5340	return 0;
5341bus_reset:
5342	mutex_unlock(&pci_slot_mutex);
5343	return pci_bus_reset(bridge->subordinate, 0);
5344}
5345
5346/**
5347 * pci_probe_reset_bus - probe whether a PCI bus can be reset
5348 * @bus: PCI bus to probe
5349 *
5350 * Return 0 if bus can be reset, negative if a bus reset is not supported.
5351 */
5352int pci_probe_reset_bus(struct pci_bus *bus)
5353{
5354	return pci_bus_reset(bus, 1);
5355}
5356EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
5357
5358/**
5359 * __pci_reset_bus - Try to reset a PCI bus
5360 * @bus: top level PCI bus to reset
5361 *
5362 * Same as above except return -EAGAIN if the bus cannot be locked
5363 */
5364static int __pci_reset_bus(struct pci_bus *bus)
5365{
5366	int rc;
5367
5368	rc = pci_bus_reset(bus, 1);
5369	if (rc)
5370		return rc;
5371
5372	if (pci_bus_trylock(bus)) {
5373		pci_bus_save_and_disable_locked(bus);
5374		might_sleep();
5375		rc = pci_bridge_secondary_bus_reset(bus->self);
5376		pci_bus_restore_locked(bus);
5377		pci_bus_unlock(bus);
5378	} else
5379		rc = -EAGAIN;
5380
5381	return rc;
5382}
5383
5384/**
5385 * pci_reset_bus - Try to reset a PCI bus
5386 * @pdev: top level PCI device to reset via slot/bus
5387 *
5388 * Same as above except return -EAGAIN if the bus cannot be locked
5389 */
5390int pci_reset_bus(struct pci_dev *pdev)
5391{
5392	return (!pci_probe_reset_slot(pdev->slot)) ?
5393	    __pci_reset_slot(pdev->slot) : __pci_reset_bus(pdev->bus);
5394}
5395EXPORT_SYMBOL_GPL(pci_reset_bus);
5396
5397/**
5398 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
5399 * @dev: PCI device to query
5400 *
5401 * Returns mmrbc: maximum designed memory read count in bytes or
5402 * appropriate error value.
5403 */
5404int pcix_get_max_mmrbc(struct pci_dev *dev)
5405{
5406	int cap;
5407	u32 stat;
5408
5409	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5410	if (!cap)
5411		return -EINVAL;
5412
5413	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5414		return -EINVAL;
5415
5416	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
5417}
5418EXPORT_SYMBOL(pcix_get_max_mmrbc);
5419
5420/**
5421 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
5422 * @dev: PCI device to query
5423 *
5424 * Returns mmrbc: maximum memory read count in bytes or appropriate error
5425 * value.
5426 */
5427int pcix_get_mmrbc(struct pci_dev *dev)
5428{
5429	int cap;
5430	u16 cmd;
5431
5432	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5433	if (!cap)
5434		return -EINVAL;
5435
5436	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5437		return -EINVAL;
5438
5439	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
5440}
5441EXPORT_SYMBOL(pcix_get_mmrbc);
5442
5443/**
5444 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
5445 * @dev: PCI device to query
5446 * @mmrbc: maximum memory read count in bytes
5447 *    valid values are 512, 1024, 2048, 4096
5448 *
5449 * If possible sets maximum memory read byte count, some bridges have errata
5450 * that prevent this.
5451 */
5452int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
5453{
5454	int cap;
5455	u32 stat, v, o;
5456	u16 cmd;
5457
5458	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
5459		return -EINVAL;
5460
5461	v = ffs(mmrbc) - 10;
5462
5463	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5464	if (!cap)
5465		return -EINVAL;
5466
5467	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5468		return -EINVAL;
5469
5470	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5471		return -E2BIG;
5472
5473	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5474		return -EINVAL;
5475
5476	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5477	if (o != v) {
5478		if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
 
5479			return -EIO;
5480
5481		cmd &= ~PCI_X_CMD_MAX_READ;
5482		cmd |= v << 2;
5483		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5484			return -EIO;
5485	}
5486	return 0;
5487}
5488EXPORT_SYMBOL(pcix_set_mmrbc);
5489
5490/**
5491 * pcie_get_readrq - get PCI Express read request size
5492 * @dev: PCI device to query
5493 *
5494 * Returns maximum memory read request in bytes or appropriate error value.
 
5495 */
5496int pcie_get_readrq(struct pci_dev *dev)
5497{
 
5498	u16 ctl;
5499
5500	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
 
 
 
 
5501
5502	return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5503}
5504EXPORT_SYMBOL(pcie_get_readrq);
5505
5506/**
5507 * pcie_set_readrq - set PCI Express maximum memory read request
5508 * @dev: PCI device to query
5509 * @rq: maximum memory read count in bytes
5510 *    valid values are 128, 256, 512, 1024, 2048, 4096
5511 *
5512 * If possible sets maximum memory read request in bytes
5513 */
5514int pcie_set_readrq(struct pci_dev *dev, int rq)
5515{
5516	u16 v;
 
5517
5518	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
5519		return -EINVAL;
5520
5521	/*
5522	 * If using the "performance" PCIe config, we clamp the read rq
5523	 * size to the max packet size to keep the host bridge from
5524	 * generating requests larger than we can cope with.
5525	 */
5526	if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5527		int mps = pcie_get_mps(dev);
5528
5529		if (mps < rq)
5530			rq = mps;
5531	}
5532
5533	v = (ffs(rq) - 8) << 12;
 
 
5534
5535	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5536						  PCI_EXP_DEVCTL_READRQ, v);
 
 
 
 
 
 
5537}
5538EXPORT_SYMBOL(pcie_set_readrq);
5539
5540/**
5541 * pcie_get_mps - get PCI Express maximum payload size
5542 * @dev: PCI device to query
5543 *
5544 * Returns maximum payload size in bytes
 
5545 */
5546int pcie_get_mps(struct pci_dev *dev)
5547{
 
5548	u16 ctl;
5549
5550	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
 
 
5551
5552	return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
 
 
 
 
5553}
5554EXPORT_SYMBOL(pcie_get_mps);
5555
5556/**
5557 * pcie_set_mps - set PCI Express maximum payload size
5558 * @dev: PCI device to query
5559 * @mps: maximum payload size in bytes
5560 *    valid values are 128, 256, 512, 1024, 2048, 4096
5561 *
5562 * If possible sets maximum payload size
5563 */
5564int pcie_set_mps(struct pci_dev *dev, int mps)
5565{
5566	u16 v;
 
5567
5568	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
5569		return -EINVAL;
5570
5571	v = ffs(mps) - 8;
5572	if (v > dev->pcie_mpss)
5573		return -EINVAL;
5574	v <<= 5;
5575
5576	return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5577						  PCI_EXP_DEVCTL_PAYLOAD, v);
5578}
5579EXPORT_SYMBOL(pcie_set_mps);
5580
5581/**
5582 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5583 *			      device and its bandwidth limitation
5584 * @dev: PCI device to query
5585 * @limiting_dev: storage for device causing the bandwidth limitation
5586 * @speed: storage for speed of limiting device
5587 * @width: storage for width of limiting device
5588 *
5589 * Walk up the PCI device chain and find the point where the minimum
5590 * bandwidth is available.  Return the bandwidth available there and (if
5591 * limiting_dev, speed, and width pointers are supplied) information about
5592 * that point.  The bandwidth returned is in Mb/s, i.e., megabits/second of
5593 * raw bandwidth.
5594 */
5595u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5596			     enum pci_bus_speed *speed,
5597			     enum pcie_link_width *width)
5598{
5599	u16 lnksta;
5600	enum pci_bus_speed next_speed;
5601	enum pcie_link_width next_width;
5602	u32 bw, next_bw;
5603
5604	if (speed)
5605		*speed = PCI_SPEED_UNKNOWN;
5606	if (width)
5607		*width = PCIE_LNK_WIDTH_UNKNOWN;
5608
5609	bw = 0;
5610
5611	while (dev) {
5612		pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5613
5614		next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5615		next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5616			PCI_EXP_LNKSTA_NLW_SHIFT;
5617
5618		next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5619
5620		/* Check if current device limits the total bandwidth */
5621		if (!bw || next_bw <= bw) {
5622			bw = next_bw;
5623
5624			if (limiting_dev)
5625				*limiting_dev = dev;
5626			if (speed)
5627				*speed = next_speed;
5628			if (width)
5629				*width = next_width;
5630		}
5631
5632		dev = pci_upstream_bridge(dev);
 
 
 
5633	}
5634
5635	return bw;
5636}
5637EXPORT_SYMBOL(pcie_bandwidth_available);
5638
5639/**
5640 * pcie_get_speed_cap - query for the PCI device's link speed capability
5641 * @dev: PCI device to query
5642 *
5643 * Query the PCI device speed capability.  Return the maximum link speed
5644 * supported by the device.
5645 */
5646enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5647{
5648	u32 lnkcap2, lnkcap;
5649
5650	/*
5651	 * Link Capabilities 2 was added in PCIe r3.0, sec 7.8.18.  The
5652	 * implementation note there recommends using the Supported Link
5653	 * Speeds Vector in Link Capabilities 2 when supported.
5654	 *
5655	 * Without Link Capabilities 2, i.e., prior to PCIe r3.0, software
5656	 * should use the Supported Link Speeds field in Link Capabilities,
5657	 * where only 2.5 GT/s and 5.0 GT/s speeds were defined.
5658	 */
5659	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5660	if (lnkcap2) { /* PCIe r3.0-compliant */
5661		if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
5662			return PCIE_SPEED_32_0GT;
5663		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5664			return PCIE_SPEED_16_0GT;
5665		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5666			return PCIE_SPEED_8_0GT;
5667		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5668			return PCIE_SPEED_5_0GT;
5669		else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5670			return PCIE_SPEED_2_5GT;
5671		return PCI_SPEED_UNKNOWN;
5672	}
5673
5674	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5675	if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_5_0GB)
5676		return PCIE_SPEED_5_0GT;
5677	else if ((lnkcap & PCI_EXP_LNKCAP_SLS) == PCI_EXP_LNKCAP_SLS_2_5GB)
5678		return PCIE_SPEED_2_5GT;
5679
5680	return PCI_SPEED_UNKNOWN;
5681}
5682EXPORT_SYMBOL(pcie_get_speed_cap);
5683
5684/**
5685 * pcie_get_width_cap - query for the PCI device's link width capability
5686 * @dev: PCI device to query
5687 *
5688 * Query the PCI device width capability.  Return the maximum link width
5689 * supported by the device.
5690 */
5691enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5692{
5693	u32 lnkcap;
5694
5695	pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5696	if (lnkcap)
5697		return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5698
5699	return PCIE_LNK_WIDTH_UNKNOWN;
5700}
5701EXPORT_SYMBOL(pcie_get_width_cap);
5702
5703/**
5704 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5705 * @dev: PCI device
5706 * @speed: storage for link speed
5707 * @width: storage for link width
5708 *
5709 * Calculate a PCI device's link bandwidth by querying for its link speed
5710 * and width, multiplying them, and applying encoding overhead.  The result
5711 * is in Mb/s, i.e., megabits/second of raw bandwidth.
5712 */
5713u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5714			   enum pcie_link_width *width)
5715{
5716	*speed = pcie_get_speed_cap(dev);
5717	*width = pcie_get_width_cap(dev);
5718
5719	if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5720		return 0;
5721
5722	return *width * PCIE_SPEED2MBS_ENC(*speed);
5723}
5724
5725/**
5726 * __pcie_print_link_status - Report the PCI device's link speed and width
5727 * @dev: PCI device to query
5728 * @verbose: Print info even when enough bandwidth is available
5729 *
5730 * If the available bandwidth at the device is less than the device is
5731 * capable of, report the device's maximum possible bandwidth and the
5732 * upstream link that limits its performance.  If @verbose, always print
5733 * the available bandwidth, even if the device isn't constrained.
5734 */
5735void __pcie_print_link_status(struct pci_dev *dev, bool verbose)
5736{
5737	enum pcie_link_width width, width_cap;
5738	enum pci_bus_speed speed, speed_cap;
5739	struct pci_dev *limiting_dev = NULL;
5740	u32 bw_avail, bw_cap;
5741
5742	bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5743	bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5744
5745	if (bw_avail >= bw_cap && verbose)
5746		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
5747			 bw_cap / 1000, bw_cap % 1000,
5748			 PCIE_SPEED2STR(speed_cap), width_cap);
5749	else if (bw_avail < bw_cap)
5750		pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
5751			 bw_avail / 1000, bw_avail % 1000,
5752			 PCIE_SPEED2STR(speed), width,
5753			 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5754			 bw_cap / 1000, bw_cap % 1000,
5755			 PCIE_SPEED2STR(speed_cap), width_cap);
5756}
5757
5758/**
5759 * pcie_print_link_status - Report the PCI device's link speed and width
5760 * @dev: PCI device to query
5761 *
5762 * Report the available bandwidth at the device.
5763 */
5764void pcie_print_link_status(struct pci_dev *dev)
5765{
5766	__pcie_print_link_status(dev, true);
5767}
5768EXPORT_SYMBOL(pcie_print_link_status);
5769
5770/**
5771 * pci_select_bars - Make BAR mask from the type of resource
5772 * @dev: the PCI device for which BAR mask is made
5773 * @flags: resource type mask to be selected
5774 *
5775 * This helper routine makes bar mask from the type of resource.
5776 */
5777int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5778{
5779	int i, bars = 0;
5780	for (i = 0; i < PCI_NUM_RESOURCES; i++)
5781		if (pci_resource_flags(dev, i) & flags)
5782			bars |= (1 << i);
5783	return bars;
5784}
5785EXPORT_SYMBOL(pci_select_bars);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5786
5787/* Some architectures require additional programming to enable VGA */
5788static arch_set_vga_state_t arch_set_vga_state;
5789
5790void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5791{
5792	arch_set_vga_state = func;	/* NULL disables */
5793}
5794
5795static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
5796				  unsigned int command_bits, u32 flags)
5797{
5798	if (arch_set_vga_state)
5799		return arch_set_vga_state(dev, decode, command_bits,
5800						flags);
5801	return 0;
5802}
5803
5804/**
5805 * pci_set_vga_state - set VGA decode state on device and parents if requested
5806 * @dev: the PCI device
5807 * @decode: true = enable decoding, false = disable decoding
5808 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
5809 * @flags: traverse ancestors and change bridges
5810 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
5811 */
5812int pci_set_vga_state(struct pci_dev *dev, bool decode,
5813		      unsigned int command_bits, u32 flags)
5814{
5815	struct pci_bus *bus;
5816	struct pci_dev *bridge;
5817	u16 cmd;
5818	int rc;
5819
5820	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
5821
5822	/* ARCH specific VGA enables */
5823	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
5824	if (rc)
5825		return rc;
5826
5827	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5828		pci_read_config_word(dev, PCI_COMMAND, &cmd);
5829		if (decode == true)
5830			cmd |= command_bits;
5831		else
5832			cmd &= ~command_bits;
5833		pci_write_config_word(dev, PCI_COMMAND, cmd);
5834	}
5835
5836	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
5837		return 0;
5838
5839	bus = dev->bus;
5840	while (bus) {
5841		bridge = bus->self;
5842		if (bridge) {
5843			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5844					     &cmd);
5845			if (decode == true)
5846				cmd |= PCI_BRIDGE_CTL_VGA;
5847			else
5848				cmd &= ~PCI_BRIDGE_CTL_VGA;
5849			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5850					      cmd);
5851		}
5852		bus = bus->parent;
5853	}
5854	return 0;
5855}
5856
5857/**
5858 * pci_add_dma_alias - Add a DMA devfn alias for a device
5859 * @dev: the PCI device for which alias is added
5860 * @devfn: alias slot and function
5861 *
5862 * This helper encodes an 8-bit devfn as a bit number in dma_alias_mask
5863 * which is used to program permissible bus-devfn source addresses for DMA
5864 * requests in an IOMMU.  These aliases factor into IOMMU group creation
5865 * and are useful for devices generating DMA requests beyond or different
5866 * from their logical bus-devfn.  Examples include device quirks where the
5867 * device simply uses the wrong devfn, as well as non-transparent bridges
5868 * where the alias may be a proxy for devices in another domain.
5869 *
5870 * IOMMU group creation is performed during device discovery or addition,
5871 * prior to any potential DMA mapping and therefore prior to driver probing
5872 * (especially for userspace assigned devices where IOMMU group definition
5873 * cannot be left as a userspace activity).  DMA aliases should therefore
5874 * be configured via quirks, such as the PCI fixup header quirk.
5875 */
5876void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5877{
5878	if (!dev->dma_alias_mask)
5879		dev->dma_alias_mask = bitmap_zalloc(U8_MAX, GFP_KERNEL);
5880	if (!dev->dma_alias_mask) {
5881		pci_warn(dev, "Unable to allocate DMA alias mask\n");
5882		return;
5883	}
5884
5885	set_bit(devfn, dev->dma_alias_mask);
5886	pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
5887		 PCI_SLOT(devfn), PCI_FUNC(devfn));
5888}
5889
5890bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5891{
5892	return (dev1->dma_alias_mask &&
5893		test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5894	       (dev2->dma_alias_mask &&
5895		test_bit(dev1->devfn, dev2->dma_alias_mask));
5896}
5897
5898bool pci_device_is_present(struct pci_dev *pdev)
5899{
5900	u32 v;
5901
5902	if (pci_dev_is_disconnected(pdev))
5903		return false;
5904	return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5905}
5906EXPORT_SYMBOL_GPL(pci_device_is_present);
5907
5908void pci_ignore_hotplug(struct pci_dev *dev)
5909{
5910	struct pci_dev *bridge = dev->bus->self;
5911
5912	dev->ignore_hotplug = 1;
5913	/* Propagate the "ignore hotplug" setting to the parent bridge. */
5914	if (bridge)
5915		bridge->ignore_hotplug = 1;
5916}
5917EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5918
5919resource_size_t __weak pcibios_default_alignment(void)
5920{
5921	return 0;
5922}
5923
5924/*
5925 * Arches that don't want to expose struct resource to userland as-is in
5926 * sysfs and /proc can implement their own pci_resource_to_user().
5927 */
5928void __weak pci_resource_to_user(const struct pci_dev *dev, int bar,
5929				 const struct resource *rsrc,
5930				 resource_size_t *start, resource_size_t *end)
5931{
5932	*start = rsrc->start;
5933	*end = rsrc->end;
5934}
5935
5936static char *resource_alignment_param;
5937static DEFINE_SPINLOCK(resource_alignment_lock);
5938
5939/**
5940 * pci_specified_resource_alignment - get resource alignment specified by user.
5941 * @dev: the PCI device to get
5942 * @resize: whether or not to change resources' size when reassigning alignment
5943 *
5944 * RETURNS: Resource alignment if it is specified.
5945 *          Zero if it is not specified.
5946 */
5947static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5948							bool *resize)
5949{
5950	int align_order, count;
5951	resource_size_t align = pcibios_default_alignment();
5952	const char *p;
5953	int ret;
5954
5955	spin_lock(&resource_alignment_lock);
5956	p = resource_alignment_param;
5957	if (!p || !*p)
5958		goto out;
5959	if (pci_has_flag(PCI_PROBE_ONLY)) {
5960		align = 0;
5961		pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5962		goto out;
5963	}
5964
5965	while (*p) {
5966		count = 0;
5967		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5968							p[count] == '@') {
5969			p += count + 1;
5970		} else {
5971			align_order = -1;
5972		}
5973
5974		ret = pci_dev_str_match(dev, p, &p);
5975		if (ret == 1) {
5976			*resize = true;
5977			if (align_order == -1)
 
 
 
 
 
 
 
 
 
 
 
 
5978				align = PAGE_SIZE;
5979			else
5980				align = 1 << align_order;
5981			break;
5982		} else if (ret < 0) {
5983			pr_err("PCI: Can't parse resource_alignment parameter: %s\n",
5984			       p);
5985			break;
5986		}
5987
5988		if (*p != ';' && *p != ',') {
5989			/* End of param or invalid format */
5990			break;
5991		}
5992		p++;
5993	}
5994out:
5995	spin_unlock(&resource_alignment_lock);
5996	return align;
5997}
5998
5999static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
6000					   resource_size_t align, bool resize)
6001{
6002	struct resource *r = &dev->resource[bar];
6003	resource_size_t size;
6004
6005	if (!(r->flags & IORESOURCE_MEM))
6006		return;
6007
6008	if (r->flags & IORESOURCE_PCI_FIXED) {
6009		pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
6010			 bar, r, (unsigned long long)align);
6011		return;
6012	}
6013
6014	size = resource_size(r);
6015	if (size >= align)
6016		return;
6017
6018	/*
6019	 * Increase the alignment of the resource.  There are two ways we
6020	 * can do this:
6021	 *
6022	 * 1) Increase the size of the resource.  BARs are aligned on their
6023	 *    size, so when we reallocate space for this resource, we'll
6024	 *    allocate it with the larger alignment.  This also prevents
6025	 *    assignment of any other BARs inside the alignment region, so
6026	 *    if we're requesting page alignment, this means no other BARs
6027	 *    will share the page.
6028	 *
6029	 *    The disadvantage is that this makes the resource larger than
6030	 *    the hardware BAR, which may break drivers that compute things
6031	 *    based on the resource size, e.g., to find registers at a
6032	 *    fixed offset before the end of the BAR.
6033	 *
6034	 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
6035	 *    set r->start to the desired alignment.  By itself this
6036	 *    doesn't prevent other BARs being put inside the alignment
6037	 *    region, but if we realign *every* resource of every device in
6038	 *    the system, none of them will share an alignment region.
6039	 *
6040	 * When the user has requested alignment for only some devices via
6041	 * the "pci=resource_alignment" argument, "resize" is true and we
6042	 * use the first method.  Otherwise we assume we're aligning all
6043	 * devices and we use the second.
6044	 */
6045
6046	pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
6047		 bar, r, (unsigned long long)align);
6048
6049	if (resize) {
6050		r->start = 0;
6051		r->end = align - 1;
6052	} else {
6053		r->flags &= ~IORESOURCE_SIZEALIGN;
6054		r->flags |= IORESOURCE_STARTALIGN;
6055		r->start = align;
6056		r->end = r->start + size - 1;
6057	}
6058	r->flags |= IORESOURCE_UNSET;
6059}
6060
6061/*
6062 * This function disables memory decoding and releases memory resources
6063 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
6064 * It also rounds up size to specified alignment.
6065 * Later on, the kernel will assign page-aligned memory resource back
6066 * to the device.
6067 */
6068void pci_reassigndev_resource_alignment(struct pci_dev *dev)
6069{
6070	int i;
6071	struct resource *r;
6072	resource_size_t align;
6073	u16 command;
6074	bool resize = false;
6075
6076	/*
6077	 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
6078	 * 3.4.1.11.  Their resources are allocated from the space
6079	 * described by the VF BARx register in the PF's SR-IOV capability.
6080	 * We can't influence their alignment here.
6081	 */
6082	if (dev->is_virtfn)
6083		return;
6084
6085	/* check if specified PCI is target device to reassign */
6086	align = pci_specified_resource_alignment(dev, &resize);
6087	if (!align)
6088		return;
6089
6090	if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
6091	    (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
6092		pci_warn(dev, "Can't reassign resources to host bridge\n");
6093		return;
6094	}
6095
6096	pci_read_config_word(dev, PCI_COMMAND, &command);
6097	command &= ~PCI_COMMAND_MEMORY;
6098	pci_write_config_word(dev, PCI_COMMAND, command);
6099
6100	for (i = 0; i <= PCI_ROM_RESOURCE; i++)
6101		pci_request_resource_alignment(dev, i, align, resize);
6102
6103	/*
6104	 * Need to disable bridge's resource window,
6105	 * to enable the kernel to reassign new resource
6106	 * window later on.
6107	 */
6108	if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
6109		for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
6110			r = &dev->resource[i];
6111			if (!(r->flags & IORESOURCE_MEM))
6112				continue;
6113			r->flags |= IORESOURCE_UNSET;
6114			r->end = resource_size(r) - 1;
6115			r->start = 0;
6116		}
6117		pci_disable_bridge_window(dev);
6118	}
6119}
6120
6121static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
6122{
6123	size_t count = 0;
6124
6125	spin_lock(&resource_alignment_lock);
6126	if (resource_alignment_param)
6127		count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
6128	spin_unlock(&resource_alignment_lock);
6129
6130	/*
6131	 * When set by the command line, resource_alignment_param will not
6132	 * have a trailing line feed, which is ugly. So conditionally add
6133	 * it here.
6134	 */
6135	if (count >= 2 && buf[count - 2] != '\n' && count < PAGE_SIZE - 1) {
6136		buf[count - 1] = '\n';
6137		buf[count++] = 0;
6138	}
6139
6140	return count;
6141}
6142
6143static ssize_t resource_alignment_store(struct bus_type *bus,
6144					const char *buf, size_t count)
6145{
6146	char *param = kstrndup(buf, count, GFP_KERNEL);
6147
6148	if (!param)
6149		return -ENOMEM;
6150
6151	spin_lock(&resource_alignment_lock);
6152	kfree(resource_alignment_param);
6153	resource_alignment_param = param;
6154	spin_unlock(&resource_alignment_lock);
6155	return count;
6156}
6157
6158static BUS_ATTR_RW(resource_alignment);
6159
6160static int __init pci_resource_alignment_sysfs_init(void)
6161{
6162	return bus_create_file(&pci_bus_type,
6163					&bus_attr_resource_alignment);
6164}
6165late_initcall(pci_resource_alignment_sysfs_init);
6166
6167static void pci_no_domains(void)
 
6168{
6169#ifdef CONFIG_PCI_DOMAINS
6170	pci_domains_supported = 0;
6171#endif
6172}
6173
6174#ifdef CONFIG_PCI_DOMAINS_GENERIC
6175static atomic_t __domain_nr = ATOMIC_INIT(-1);
6176
6177static int pci_get_new_domain_nr(void)
6178{
6179	return atomic_inc_return(&__domain_nr);
 
6180}
6181
6182static int of_pci_bus_find_domain_nr(struct device *parent)
6183{
6184	static int use_dt_domains = -1;
6185	int domain = -1;
6186
6187	if (parent)
6188		domain = of_get_pci_domain_nr(parent->of_node);
6189
6190	/*
6191	 * Check DT domain and use_dt_domains values.
6192	 *
6193	 * If DT domain property is valid (domain >= 0) and
6194	 * use_dt_domains != 0, the DT assignment is valid since this means
6195	 * we have not previously allocated a domain number by using
6196	 * pci_get_new_domain_nr(); we should also update use_dt_domains to
6197	 * 1, to indicate that we have just assigned a domain number from
6198	 * DT.
6199	 *
6200	 * If DT domain property value is not valid (ie domain < 0), and we
6201	 * have not previously assigned a domain number from DT
6202	 * (use_dt_domains != 1) we should assign a domain number by
6203	 * using the:
6204	 *
6205	 * pci_get_new_domain_nr()
6206	 *
6207	 * API and update the use_dt_domains value to keep track of method we
6208	 * are using to assign domain numbers (use_dt_domains = 0).
6209	 *
6210	 * All other combinations imply we have a platform that is trying
6211	 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
6212	 * which is a recipe for domain mishandling and it is prevented by
6213	 * invalidating the domain value (domain = -1) and printing a
6214	 * corresponding error.
6215	 */
6216	if (domain >= 0 && use_dt_domains) {
6217		use_dt_domains = 1;
6218	} else if (domain < 0 && use_dt_domains != 1) {
6219		use_dt_domains = 0;
6220		domain = pci_get_new_domain_nr();
6221	} else {
6222		if (parent)
6223			pr_err("Node %pOF has ", parent->of_node);
6224		pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
6225		domain = -1;
6226	}
6227
6228	return domain;
6229}
6230
6231int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
6232{
6233	return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
6234			       acpi_pci_bus_find_domain_nr(bus);
6235}
6236#endif
 
6237
6238/**
6239 * pci_ext_cfg_avail - can we access extended PCI config space?
 
6240 *
6241 * Returns 1 if we can access PCI extended config space (offsets
6242 * greater than 0xff). This is the default implementation. Architecture
6243 * implementations can override this.
6244 */
6245int __weak pci_ext_cfg_avail(void)
6246{
6247	return 1;
6248}
6249
6250void __weak pci_fixup_cardbus(struct pci_bus *bus)
6251{
6252}
6253EXPORT_SYMBOL(pci_fixup_cardbus);
6254
6255static int __init pci_setup(char *str)
6256{
6257	while (str) {
6258		char *k = strchr(str, ',');
6259		if (k)
6260			*k++ = 0;
6261		if (*str && (str = pcibios_setup(str)) && *str) {
6262			if (!strcmp(str, "nomsi")) {
6263				pci_no_msi();
6264			} else if (!strncmp(str, "noats", 5)) {
6265				pr_info("PCIe: ATS is disabled\n");
6266				pcie_ats_disabled = true;
6267			} else if (!strcmp(str, "noaer")) {
6268				pci_no_aer();
6269			} else if (!strcmp(str, "earlydump")) {
6270				pci_early_dump = true;
6271			} else if (!strncmp(str, "realloc=", 8)) {
6272				pci_realloc_get_opt(str + 8);
6273			} else if (!strncmp(str, "realloc", 7)) {
6274				pci_realloc_get_opt("on");
6275			} else if (!strcmp(str, "nodomains")) {
6276				pci_no_domains();
6277			} else if (!strncmp(str, "noari", 5)) {
6278				pcie_ari_disabled = true;
6279			} else if (!strncmp(str, "cbiosize=", 9)) {
6280				pci_cardbus_io_size = memparse(str + 9, &str);
6281			} else if (!strncmp(str, "cbmemsize=", 10)) {
6282				pci_cardbus_mem_size = memparse(str + 10, &str);
6283			} else if (!strncmp(str, "resource_alignment=", 19)) {
6284				resource_alignment_param = str + 19;
 
6285			} else if (!strncmp(str, "ecrc=", 5)) {
6286				pcie_ecrc_get_policy(str + 5);
6287			} else if (!strncmp(str, "hpiosize=", 9)) {
6288				pci_hotplug_io_size = memparse(str + 9, &str);
6289			} else if (!strncmp(str, "hpmemsize=", 10)) {
6290				pci_hotplug_mem_size = memparse(str + 10, &str);
6291			} else if (!strncmp(str, "hpbussize=", 10)) {
6292				pci_hotplug_bus_size =
6293					simple_strtoul(str + 10, &str, 0);
6294				if (pci_hotplug_bus_size > 0xff)
6295					pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
6296			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
6297				pcie_bus_config = PCIE_BUS_TUNE_OFF;
6298			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
6299				pcie_bus_config = PCIE_BUS_SAFE;
6300			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
6301				pcie_bus_config = PCIE_BUS_PERFORMANCE;
6302			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
6303				pcie_bus_config = PCIE_BUS_PEER2PEER;
6304			} else if (!strncmp(str, "pcie_scan_all", 13)) {
6305				pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
6306			} else if (!strncmp(str, "disable_acs_redir=", 18)) {
6307				disable_acs_redir_param = str + 18;
6308			} else {
6309				pr_err("PCI: Unknown option `%s'\n", str);
 
6310			}
6311		}
6312		str = k;
6313	}
6314	return 0;
6315}
6316early_param("pci", pci_setup);
6317
6318/*
6319 * 'resource_alignment_param' and 'disable_acs_redir_param' are initialized
6320 * in pci_setup(), above, to point to data in the __initdata section which
6321 * will be freed after the init sequence is complete. We can't allocate memory
6322 * in pci_setup() because some architectures do not have any memory allocation
6323 * service available during an early_param() call. So we allocate memory and
6324 * copy the variable here before the init section is freed.
6325 *
6326 */
6327static int __init pci_realloc_setup_params(void)
6328{
6329	resource_alignment_param = kstrdup(resource_alignment_param,
6330					   GFP_KERNEL);
6331	disable_acs_redir_param = kstrdup(disable_acs_redir_param, GFP_KERNEL);
 
 
 
 
 
 
 
 
 
 
 
 
 
6332
6333	return 0;
6334}
6335pure_initcall(pci_realloc_setup_params);
 
 
 
 
 
 
 
v3.1
 
   1/*
   2 *	PCI Bus Services, see include/linux/pci.h for further explanation.
   3 *
   4 *	Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
   5 *	David Mosberger-Tang
   6 *
   7 *	Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
   8 */
   9
 
  10#include <linux/kernel.h>
  11#include <linux/delay.h>
 
  12#include <linux/init.h>
 
 
  13#include <linux/pci.h>
  14#include <linux/pm.h>
  15#include <linux/slab.h>
  16#include <linux/module.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/log2.h>
  20#include <linux/pci-aspm.h>
  21#include <linux/pm_wakeup.h>
  22#include <linux/interrupt.h>
  23#include <linux/device.h>
  24#include <linux/pm_runtime.h>
 
 
 
  25#include <asm/setup.h>
 
 
  26#include "pci.h"
  27
 
 
  28const char *pci_power_names[] = {
  29	"error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
  30};
  31EXPORT_SYMBOL_GPL(pci_power_names);
  32
  33int isa_dma_bridge_buggy;
  34EXPORT_SYMBOL(isa_dma_bridge_buggy);
  35
  36int pci_pci_problems;
  37EXPORT_SYMBOL(pci_pci_problems);
  38
  39unsigned int pci_pm_d3_delay;
  40
  41static void pci_pme_list_scan(struct work_struct *work);
  42
  43static LIST_HEAD(pci_pme_list);
  44static DEFINE_MUTEX(pci_pme_list_mutex);
  45static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
  46
  47struct pci_pme_device {
  48	struct list_head list;
  49	struct pci_dev *dev;
  50};
  51
  52#define PME_TIMEOUT 1000 /* How long between PME checks */
  53
  54static void pci_dev_d3_sleep(struct pci_dev *dev)
  55{
  56	unsigned int delay = dev->d3_delay;
  57
  58	if (delay < pci_pm_d3_delay)
  59		delay = pci_pm_d3_delay;
  60
  61	msleep(delay);
 
  62}
  63
  64#ifdef CONFIG_PCI_DOMAINS
  65int pci_domains_supported = 1;
  66#endif
  67
  68#define DEFAULT_CARDBUS_IO_SIZE		(256)
  69#define DEFAULT_CARDBUS_MEM_SIZE	(64*1024*1024)
  70/* pci=cbmemsize=nnM,cbiosize=nn can override this */
  71unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
  72unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
  73
  74#define DEFAULT_HOTPLUG_IO_SIZE		(256)
  75#define DEFAULT_HOTPLUG_MEM_SIZE	(2*1024*1024)
  76/* pci=hpmemsize=nnM,hpiosize=nn can override this */
  77unsigned long pci_hotplug_io_size  = DEFAULT_HOTPLUG_IO_SIZE;
  78unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
  79
  80enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
 
 
 
  81
  82/*
  83 * The default CLS is used if arch didn't set CLS explicitly and not
  84 * all pci devices agree on the same value.  Arch can override either
  85 * the dfl or actual value as it sees fit.  Don't forget this is
  86 * measured in 32-bit words, not bytes.
  87 */
  88u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
  89u8 pci_cache_line_size;
  90
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  91/**
  92 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
  93 * @bus: pointer to PCI bus structure to search
  94 *
  95 * Given a PCI bus, returns the highest PCI bus number present in the set
  96 * including the given PCI bus and its list of child PCI buses.
  97 */
  98unsigned char pci_bus_max_busnr(struct pci_bus* bus)
  99{
 100	struct list_head *tmp;
 101	unsigned char max, n;
 102
 103	max = bus->subordinate;
 104	list_for_each(tmp, &bus->children) {
 105		n = pci_bus_max_busnr(pci_bus_b(tmp));
 106		if(n > max)
 107			max = n;
 108	}
 109	return max;
 110}
 111EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
 112
 113#ifdef CONFIG_HAS_IOMEM
 114void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
 115{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 116	/*
 117	 * Make sure the BAR is actually a memory resource, not an IO resource
 118	 */
 119	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
 120		WARN_ON(1);
 121		return NULL;
 122	}
 123	return ioremap_nocache(pci_resource_start(pdev, bar),
 124				     pci_resource_len(pdev, bar));
 125}
 126EXPORT_SYMBOL_GPL(pci_ioremap_bar);
 127#endif
 128
 129#if 0
 130/**
 131 * pci_max_busnr - returns maximum PCI bus number
 
 
 
 
 
 
 
 
 
 
 
 
 
 132 *
 133 * Returns the highest PCI bus number present in the system global list of
 134 * PCI buses.
 135 */
 136unsigned char __devinit
 137pci_max_busnr(void)
 138{
 139	struct pci_bus *bus = NULL;
 140	unsigned char max, n;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 141
 142	max = 0;
 143	while ((bus = pci_find_next_bus(bus)) != NULL) {
 144		n = pci_bus_max_busnr(bus);
 145		if(n > max)
 146			max = n;
 
 
 
 
 147	}
 148	return max;
 
 
 
 
 
 
 
 149}
 150
 151#endif  /*  0  */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 152
 153#define PCI_FIND_CAP_TTL	48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 154
 155static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
 156				   u8 pos, int cap, int *ttl)
 157{
 158	u8 id;
 
 
 
 159
 160	while ((*ttl)--) {
 161		pci_bus_read_config_byte(bus, devfn, pos, &pos);
 162		if (pos < 0x40)
 163			break;
 164		pos &= ~3;
 165		pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
 166					 &id);
 
 167		if (id == 0xff)
 168			break;
 169		if (id == cap)
 170			return pos;
 171		pos += PCI_CAP_LIST_NEXT;
 172	}
 173	return 0;
 174}
 175
 176static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
 177			       u8 pos, int cap)
 178{
 179	int ttl = PCI_FIND_CAP_TTL;
 180
 181	return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
 182}
 183
 184int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
 185{
 186	return __pci_find_next_cap(dev->bus, dev->devfn,
 187				   pos + PCI_CAP_LIST_NEXT, cap);
 188}
 189EXPORT_SYMBOL_GPL(pci_find_next_capability);
 190
 191static int __pci_bus_find_cap_start(struct pci_bus *bus,
 192				    unsigned int devfn, u8 hdr_type)
 193{
 194	u16 status;
 195
 196	pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
 197	if (!(status & PCI_STATUS_CAP_LIST))
 198		return 0;
 199
 200	switch (hdr_type) {
 201	case PCI_HEADER_TYPE_NORMAL:
 202	case PCI_HEADER_TYPE_BRIDGE:
 203		return PCI_CAPABILITY_LIST;
 204	case PCI_HEADER_TYPE_CARDBUS:
 205		return PCI_CB_CAPABILITY_LIST;
 206	default:
 207		return 0;
 208	}
 209
 210	return 0;
 211}
 212
 213/**
 214 * pci_find_capability - query for devices' capabilities 
 215 * @dev: PCI device to query
 216 * @cap: capability code
 217 *
 218 * Tell if a device supports a given PCI capability.
 219 * Returns the address of the requested capability structure within the
 220 * device's PCI configuration space or 0 in case the device does not
 221 * support it.  Possible values for @cap:
 222 *
 223 *  %PCI_CAP_ID_PM           Power Management 
 224 *  %PCI_CAP_ID_AGP          Accelerated Graphics Port 
 225 *  %PCI_CAP_ID_VPD          Vital Product Data 
 226 *  %PCI_CAP_ID_SLOTID       Slot Identification 
 227 *  %PCI_CAP_ID_MSI          Message Signalled Interrupts
 228 *  %PCI_CAP_ID_CHSWP        CompactPCI HotSwap 
 229 *  %PCI_CAP_ID_PCIX         PCI-X
 230 *  %PCI_CAP_ID_EXP          PCI Express
 231 */
 232int pci_find_capability(struct pci_dev *dev, int cap)
 233{
 234	int pos;
 235
 236	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 237	if (pos)
 238		pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
 239
 240	return pos;
 241}
 
 242
 243/**
 244 * pci_bus_find_capability - query for devices' capabilities 
 245 * @bus:   the PCI bus to query
 246 * @devfn: PCI device to query
 247 * @cap:   capability code
 248 *
 249 * Like pci_find_capability() but works for pci devices that do not have a
 250 * pci_dev structure set up yet. 
 251 *
 252 * Returns the address of the requested capability structure within the
 253 * device's PCI configuration space or 0 in case the device does not
 254 * support it.
 255 */
 256int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
 257{
 258	int pos;
 259	u8 hdr_type;
 260
 261	pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
 262
 263	pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
 264	if (pos)
 265		pos = __pci_find_next_cap(bus, devfn, pos, cap);
 266
 267	return pos;
 268}
 
 269
 270/**
 271 * pci_find_ext_capability - Find an extended capability
 272 * @dev: PCI device to query
 
 273 * @cap: capability code
 274 *
 275 * Returns the address of the requested extended capability structure
 276 * within the device's PCI configuration space or 0 if the device does
 277 * not support it.  Possible values for @cap:
 278 *
 279 *  %PCI_EXT_CAP_ID_ERR		Advanced Error Reporting
 280 *  %PCI_EXT_CAP_ID_VC		Virtual Channel
 281 *  %PCI_EXT_CAP_ID_DSN		Device Serial Number
 282 *  %PCI_EXT_CAP_ID_PWR		Power Budgeting
 283 */
 284int pci_find_ext_capability(struct pci_dev *dev, int cap)
 285{
 286	u32 header;
 287	int ttl;
 288	int pos = PCI_CFG_SPACE_SIZE;
 289
 290	/* minimum 8 bytes per capability */
 291	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 292
 293	if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
 294		return 0;
 295
 
 
 
 296	if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 297		return 0;
 298
 299	/*
 300	 * If we have no capabilities, this is indicated by cap ID,
 301	 * cap version and next pointer all being 0.
 302	 */
 303	if (header == 0)
 304		return 0;
 305
 306	while (ttl-- > 0) {
 307		if (PCI_EXT_CAP_ID(header) == cap)
 308			return pos;
 309
 310		pos = PCI_EXT_CAP_NEXT(header);
 311		if (pos < PCI_CFG_SPACE_SIZE)
 312			break;
 313
 314		if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
 315			break;
 316	}
 317
 318	return 0;
 319}
 320EXPORT_SYMBOL_GPL(pci_find_ext_capability);
 321
 322/**
 323 * pci_bus_find_ext_capability - find an extended capability
 324 * @bus:   the PCI bus to query
 325 * @devfn: PCI device to query
 326 * @cap:   capability code
 327 *
 328 * Like pci_find_ext_capability() but works for pci devices that do not have a
 329 * pci_dev structure set up yet.
 
 330 *
 331 * Returns the address of the requested capability structure within the
 332 * device's PCI configuration space or 0 in case the device does not
 333 * support it.
 
 334 */
 335int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
 336				int cap)
 337{
 338	u32 header;
 339	int ttl;
 340	int pos = PCI_CFG_SPACE_SIZE;
 341
 342	/* minimum 8 bytes per capability */
 343	ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
 344
 345	if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 346		return 0;
 347	if (header == 0xffffffff || header == 0)
 348		return 0;
 349
 350	while (ttl-- > 0) {
 351		if (PCI_EXT_CAP_ID(header) == cap)
 352			return pos;
 353
 354		pos = PCI_EXT_CAP_NEXT(header);
 355		if (pos < PCI_CFG_SPACE_SIZE)
 356			break;
 357
 358		if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
 359			break;
 360	}
 361
 362	return 0;
 363}
 
 364
 365static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
 366{
 367	int rc, ttl = PCI_FIND_CAP_TTL;
 368	u8 cap, mask;
 369
 370	if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
 371		mask = HT_3BIT_CAP_MASK;
 372	else
 373		mask = HT_5BIT_CAP_MASK;
 374
 375	pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
 376				      PCI_CAP_ID_HT, &ttl);
 377	while (pos) {
 378		rc = pci_read_config_byte(dev, pos + 3, &cap);
 379		if (rc != PCIBIOS_SUCCESSFUL)
 380			return 0;
 381
 382		if ((cap & mask) == ht_cap)
 383			return pos;
 384
 385		pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
 386					      pos + PCI_CAP_LIST_NEXT,
 387					      PCI_CAP_ID_HT, &ttl);
 388	}
 389
 390	return 0;
 391}
 392/**
 393 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
 394 * @dev: PCI device to query
 395 * @pos: Position from which to continue searching
 396 * @ht_cap: Hypertransport capability code
 397 *
 398 * To be used in conjunction with pci_find_ht_capability() to search for
 399 * all capabilities matching @ht_cap. @pos should always be a value returned
 400 * from pci_find_ht_capability().
 401 *
 402 * NB. To be 100% safe against broken PCI devices, the caller should take
 403 * steps to avoid an infinite loop.
 404 */
 405int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
 406{
 407	return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
 408}
 409EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
 410
 411/**
 412 * pci_find_ht_capability - query a device's Hypertransport capabilities
 413 * @dev: PCI device to query
 414 * @ht_cap: Hypertransport capability code
 415 *
 416 * Tell if a device supports a given Hypertransport capability.
 417 * Returns an address within the device's PCI configuration space
 418 * or 0 in case the device does not support the request capability.
 419 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
 420 * which has a Hypertransport capability matching @ht_cap.
 421 */
 422int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
 423{
 424	int pos;
 425
 426	pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
 427	if (pos)
 428		pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
 429
 430	return pos;
 431}
 432EXPORT_SYMBOL_GPL(pci_find_ht_capability);
 433
 434/**
 435 * pci_find_parent_resource - return resource region of parent bus of given region
 
 436 * @dev: PCI device structure contains resources to be searched
 437 * @res: child resource record for which parent is sought
 438 *
 439 *  For given resource region of given device, return the resource
 440 *  region of parent bus the given region is contained in or where
 441 *  it should be allocated from.
 442 */
 443struct resource *
 444pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
 445{
 446	const struct pci_bus *bus = dev->bus;
 
 447	int i;
 448	struct resource *best = NULL, *r;
 449
 450	pci_bus_for_each_resource(bus, r, i) {
 451		if (!r)
 452			continue;
 453		if (res->start && !(res->start >= r->start && res->end <= r->end))
 454			continue;	/* Not contained */
 455		if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
 456			continue;	/* Wrong type */
 457		if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
 458			return r;	/* Exact match */
 459		/* We can't insert a non-prefetch resource inside a prefetchable parent .. */
 460		if (r->flags & IORESOURCE_PREFETCH)
 461			continue;
 462		/* .. but we can put a prefetchable resource inside a non-prefetchable one */
 463		if (!best)
 464			best = r;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 465	}
 466	return best;
 
 467}
 468
 469/**
 470 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
 471 * @dev: PCI device to have its BARs restored
 472 *
 473 * Restore the BAR values for a given device, so as to make it
 474 * accessible by its driver.
 475 */
 476static void
 477pci_restore_bars(struct pci_dev *dev)
 478{
 479	int i;
 480
 481	for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
 482		pci_update_resource(dev, i);
 483}
 484
 485static struct pci_platform_pm_ops *pci_platform_pm;
 486
 487int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
 488{
 489	if (!ops->is_manageable || !ops->set_state || !ops->choose_state
 490	    || !ops->sleep_wake || !ops->can_wakeup)
 491		return -EINVAL;
 492	pci_platform_pm = ops;
 493	return 0;
 494}
 495
 496static inline bool platform_pci_power_manageable(struct pci_dev *dev)
 497{
 498	return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
 499}
 500
 501static inline int platform_pci_set_power_state(struct pci_dev *dev,
 502                                                pci_power_t t)
 503{
 504	return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
 505}
 506
 
 
 
 
 
 
 
 
 
 
 
 507static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
 508{
 509	return pci_platform_pm ?
 510			pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
 511}
 512
 513static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
 514{
 515	return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
 
 516}
 517
 518static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
 519{
 520	return pci_platform_pm ?
 521			pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
 522}
 523
 524static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
 525{
 526	return pci_platform_pm ?
 527			pci_platform_pm->run_wake(dev, enable) : -ENODEV;
 528}
 529
 530/**
 531 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
 532 *                           given PCI device
 533 * @dev: PCI device to handle.
 534 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 535 *
 536 * RETURN VALUE:
 537 * -EINVAL if the requested state is invalid.
 538 * -EIO if device does not support PCI PM or its PM capabilities register has a
 539 * wrong version, or device doesn't support the requested state.
 540 * 0 if device already is in the requested state.
 541 * 0 if device's power state has been successfully changed.
 542 */
 543static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
 544{
 545	u16 pmcsr;
 546	bool need_restore = false;
 547
 548	/* Check if we're already there */
 549	if (dev->current_state == state)
 550		return 0;
 551
 552	if (!dev->pm_cap)
 553		return -EIO;
 554
 555	if (state < PCI_D0 || state > PCI_D3hot)
 556		return -EINVAL;
 557
 558	/* Validate current state:
 559	 * Can enter D0 from any state, but if we can only go deeper 
 
 560	 * to sleep if we're already in a low power state
 561	 */
 562	if (state != PCI_D0 && dev->current_state <= PCI_D3cold
 563	    && dev->current_state > state) {
 564		dev_err(&dev->dev, "invalid power transition "
 565			"(from state %d to %d)\n", dev->current_state, state);
 566		return -EINVAL;
 567	}
 568
 569	/* check if this device supports the desired state */
 570	if ((state == PCI_D1 && !dev->d1_support)
 571	   || (state == PCI_D2 && !dev->d2_support))
 572		return -EIO;
 573
 574	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 575
 576	/* If we're (effectively) in D3, force entire word to 0.
 
 577	 * This doesn't affect PME_Status, disables PME_En, and
 578	 * sets PowerState to 0.
 579	 */
 580	switch (dev->current_state) {
 581	case PCI_D0:
 582	case PCI_D1:
 583	case PCI_D2:
 584		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
 585		pmcsr |= state;
 586		break;
 587	case PCI_D3hot:
 588	case PCI_D3cold:
 589	case PCI_UNKNOWN: /* Boot-up */
 590		if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
 591		 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
 592			need_restore = true;
 593		/* Fall-through: force to D0 */
 594	default:
 595		pmcsr = 0;
 596		break;
 597	}
 598
 599	/* enter specified state */
 600	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 601
 602	/* Mandatory power management transition delays */
 603	/* see PCI PM 1.1 5.6.1 table 18 */
 
 
 604	if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
 605		pci_dev_d3_sleep(dev);
 606	else if (state == PCI_D2 || dev->current_state == PCI_D2)
 607		udelay(PCI_PM_D2_DELAY);
 608
 609	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 610	dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 611	if (dev->current_state != state && printk_ratelimit())
 612		dev_info(&dev->dev, "Refused to change power state, "
 613			"currently in D%d\n", dev->current_state);
 614
 615	/* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
 
 616	 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
 617	 * from D3hot to D0 _may_ perform an internal reset, thereby
 618	 * going to "D0 Uninitialized" rather than "D0 Initialized".
 619	 * For example, at least some versions of the 3c905B and the
 620	 * 3c556B exhibit this behaviour.
 621	 *
 622	 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
 623	 * devices in a D3hot state at boot.  Consequently, we need to
 624	 * restore at least the BARs so that the device will be
 625	 * accessible to its driver.
 626	 */
 627	if (need_restore)
 628		pci_restore_bars(dev);
 629
 630	if (dev->bus->self)
 631		pcie_aspm_pm_state_change(dev->bus->self);
 632
 633	return 0;
 634}
 635
 636/**
 637 * pci_update_current_state - Read PCI power state of given device from its
 638 *                            PCI PM registers and cache it
 639 * @dev: PCI device to handle.
 640 * @state: State to cache in case the device doesn't have the PM capability
 
 
 
 
 
 
 
 641 */
 642void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
 643{
 644	if (dev->pm_cap) {
 
 
 
 645		u16 pmcsr;
 646
 647		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
 648		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
 649	} else {
 650		dev->current_state = state;
 651	}
 652}
 653
 654/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 655 * pci_platform_power_transition - Use platform to change device power state
 656 * @dev: PCI device to handle.
 657 * @state: State to put the device into.
 658 */
 659static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
 660{
 661	int error;
 662
 663	if (platform_pci_power_manageable(dev)) {
 664		error = platform_pci_set_power_state(dev, state);
 665		if (!error)
 666			pci_update_current_state(dev, state);
 667	} else {
 668		error = -ENODEV;
 669		/* Fall back to PCI_D0 if native PM is not supported */
 670		if (!dev->pm_cap)
 671			dev->current_state = PCI_D0;
 672	}
 673
 674	return error;
 675}
 676
 677/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 678 * __pci_start_power_transition - Start power transition of a PCI device
 679 * @dev: PCI device to handle.
 680 * @state: State to put the device into.
 681 */
 682static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
 683{
 684	if (state == PCI_D0)
 685		pci_platform_power_transition(dev, PCI_D0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 686}
 687
 688/**
 689 * __pci_complete_power_transition - Complete power transition of a PCI device
 690 * @dev: PCI device to handle.
 691 * @state: State to put the device into.
 692 *
 693 * This function should not be called directly by device drivers.
 694 */
 695int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
 696{
 697	return state >= PCI_D0 ?
 698			pci_platform_power_transition(dev, state) : -EINVAL;
 
 
 
 
 
 
 
 699}
 700EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
 701
 702/**
 703 * pci_set_power_state - Set the power state of a PCI device
 704 * @dev: PCI device to handle.
 705 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
 706 *
 707 * Transition a device to a new power state, using the platform firmware and/or
 708 * the device's PCI PM registers.
 709 *
 710 * RETURN VALUE:
 711 * -EINVAL if the requested state is invalid.
 712 * -EIO if device does not support PCI PM or its PM capabilities register has a
 713 * wrong version, or device doesn't support the requested state.
 
 714 * 0 if device already is in the requested state.
 
 715 * 0 if device's power state has been successfully changed.
 716 */
 717int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
 718{
 719	int error;
 720
 721	/* bound the state we're entering */
 722	if (state > PCI_D3hot)
 723		state = PCI_D3hot;
 724	else if (state < PCI_D0)
 725		state = PCI_D0;
 726	else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
 
 727		/*
 728		 * If the device or the parent bridge do not support PCI PM,
 729		 * ignore the request if we're doing anything other than putting
 730		 * it into D0 (which would only happen on boot).
 
 731		 */
 732		return 0;
 733
 
 
 
 
 734	__pci_start_power_transition(dev, state);
 735
 736	/* This device is quirked not to be put into D3, so
 737	   don't put it in D3 */
 738	if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
 
 
 739		return 0;
 740
 741	error = pci_raw_set_power_state(dev, state);
 
 
 
 
 
 742
 743	if (!__pci_complete_power_transition(dev, state))
 744		error = 0;
 745	/*
 746	 * When aspm_policy is "powersave" this call ensures
 747	 * that ASPM is configured.
 748	 */
 749	if (!error && dev->bus->self)
 750		pcie_aspm_powersave_config_link(dev->bus->self);
 751
 752	return error;
 753}
 
 
 
 
 
 
 
 
 
 
 
 
 754
 755/**
 756 * pci_choose_state - Choose the power state of a PCI device
 757 * @dev: PCI device to be suspended
 758 * @state: target sleep state for the whole system. This is the value
 759 *	that is passed to suspend() function.
 760 *
 761 * Returns PCI power state suitable for given device and given system
 762 * message.
 763 */
 764
 765pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
 766{
 767	pci_power_t ret;
 768
 769	if (!pci_find_capability(dev, PCI_CAP_ID_PM))
 770		return PCI_D0;
 771
 772	ret = platform_pci_choose_state(dev);
 773	if (ret != PCI_POWER_ERROR)
 774		return ret;
 775
 776	switch (state.event) {
 777	case PM_EVENT_ON:
 778		return PCI_D0;
 779	case PM_EVENT_FREEZE:
 780	case PM_EVENT_PRETHAW:
 781		/* REVISIT both freeze and pre-thaw "should" use D0 */
 782	case PM_EVENT_SUSPEND:
 783	case PM_EVENT_HIBERNATE:
 784		return PCI_D3hot;
 785	default:
 786		dev_info(&dev->dev, "unrecognized suspend event %d\n",
 787			 state.event);
 788		BUG();
 789	}
 790	return PCI_D0;
 791}
 792
 793EXPORT_SYMBOL(pci_choose_state);
 794
 795#define PCI_EXP_SAVE_REGS	7
 796
 797#define pcie_cap_has_devctl(type, flags)	1
 798#define pcie_cap_has_lnkctl(type, flags)		\
 799		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 800		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 801		  type == PCI_EXP_TYPE_ENDPOINT ||	\
 802		  type == PCI_EXP_TYPE_LEG_END))
 803#define pcie_cap_has_sltctl(type, flags)		\
 804		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 805		 ((type == PCI_EXP_TYPE_ROOT_PORT) ||	\
 806		  (type == PCI_EXP_TYPE_DOWNSTREAM &&	\
 807		   (flags & PCI_EXP_FLAGS_SLOT))))
 808#define pcie_cap_has_rtctl(type, flags)			\
 809		((flags & PCI_EXP_FLAGS_VERS) > 1 ||	\
 810		 (type == PCI_EXP_TYPE_ROOT_PORT ||	\
 811		  type == PCI_EXP_TYPE_RC_EC))
 812#define pcie_cap_has_devctl2(type, flags)		\
 813		((flags & PCI_EXP_FLAGS_VERS) > 1)
 814#define pcie_cap_has_lnkctl2(type, flags)		\
 815		((flags & PCI_EXP_FLAGS_VERS) > 1)
 816#define pcie_cap_has_sltctl2(type, flags)		\
 817		((flags & PCI_EXP_FLAGS_VERS) > 1)
 818
 819static int pci_save_pcie_state(struct pci_dev *dev)
 820{
 821	int pos, i = 0;
 822	struct pci_cap_saved_state *save_state;
 823	u16 *cap;
 824	u16 flags;
 825
 826	pos = pci_pcie_cap(dev);
 827	if (!pos)
 828		return 0;
 829
 830	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 831	if (!save_state) {
 832		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 833		return -ENOMEM;
 834	}
 
 835	cap = (u16 *)&save_state->cap.data[0];
 836
 837	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 838
 839	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 840		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
 841	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 842		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
 843	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 844		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
 845	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 846		pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
 847	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 848		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
 849	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 850		pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
 851	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 852		pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
 853
 854	return 0;
 855}
 856
 857static void pci_restore_pcie_state(struct pci_dev *dev)
 858{
 859	int i = 0, pos;
 860	struct pci_cap_saved_state *save_state;
 861	u16 *cap;
 862	u16 flags;
 863
 864	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
 865	pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
 866	if (!save_state || pos <= 0)
 867		return;
 
 868	cap = (u16 *)&save_state->cap.data[0];
 869
 870	pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
 871
 872	if (pcie_cap_has_devctl(dev->pcie_type, flags))
 873		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
 874	if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
 875		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
 876	if (pcie_cap_has_sltctl(dev->pcie_type, flags))
 877		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
 878	if (pcie_cap_has_rtctl(dev->pcie_type, flags))
 879		pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
 880	if (pcie_cap_has_devctl2(dev->pcie_type, flags))
 881		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
 882	if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
 883		pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
 884	if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
 885		pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
 886}
 887
 888
 889static int pci_save_pcix_state(struct pci_dev *dev)
 890{
 891	int pos;
 892	struct pci_cap_saved_state *save_state;
 893
 894	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 895	if (pos <= 0)
 896		return 0;
 897
 898	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 899	if (!save_state) {
 900		dev_err(&dev->dev, "buffer not found in %s\n", __func__);
 901		return -ENOMEM;
 902	}
 903
 904	pci_read_config_word(dev, pos + PCI_X_CMD,
 905			     (u16 *)save_state->cap.data);
 906
 907	return 0;
 908}
 909
 910static void pci_restore_pcix_state(struct pci_dev *dev)
 911{
 912	int i = 0, pos;
 913	struct pci_cap_saved_state *save_state;
 914	u16 *cap;
 915
 916	save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
 917	pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
 918	if (!save_state || pos <= 0)
 919		return;
 920	cap = (u16 *)&save_state->cap.data[0];
 921
 922	pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
 923}
 924
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 925
 926/**
 927 * pci_save_state - save the PCI configuration space of a device before suspending
 928 * @dev: - PCI device that we're dealing with
 
 929 */
 930int
 931pci_save_state(struct pci_dev *dev)
 932{
 933	int i;
 934	/* XXX: 100% dword access ok here? */
 935	for (i = 0; i < 16; i++)
 936		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 937	dev->state_saved = true;
 938	if ((i = pci_save_pcie_state(dev)) != 0)
 
 
 939		return i;
 940	if ((i = pci_save_pcix_state(dev)) != 0)
 
 
 941		return i;
 942	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 943}
 944
 945/** 
 946 * pci_restore_state - Restore the saved state of a PCI device
 947 * @dev: - PCI device that we're dealing with
 948 */
 949void pci_restore_state(struct pci_dev *dev)
 950{
 951	int i;
 952	u32 val;
 953
 954	if (!dev->state_saved)
 955		return;
 956
 957	/* PCI Express register must be restored first */
 
 
 
 
 
 958	pci_restore_pcie_state(dev);
 
 
 
 
 
 
 
 
 
 
 959
 960	/*
 961	 * The Base Address register should be programmed before the command
 962	 * register(s)
 963	 */
 964	for (i = 15; i >= 0; i--) {
 965		pci_read_config_dword(dev, i * 4, &val);
 966		if (val != dev->saved_config_space[i]) {
 967			dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
 968				"space at offset %#x (was %#x, writing %#x)\n",
 969				i, val, (int)dev->saved_config_space[i]);
 970			pci_write_config_dword(dev,i * 4,
 971				dev->saved_config_space[i]);
 972		}
 973	}
 974	pci_restore_pcix_state(dev);
 975	pci_restore_msi_state(dev);
 
 
 
 976	pci_restore_iov_state(dev);
 977
 978	dev->state_saved = false;
 979}
 
 980
 981struct pci_saved_state {
 982	u32 config_space[16];
 983	struct pci_cap_saved_data cap[0];
 984};
 985
 986/**
 987 * pci_store_saved_state - Allocate and return an opaque struct containing
 988 *			   the device saved state.
 989 * @dev: PCI device that we're dealing with
 990 *
 991 * Rerturn NULL if no state or error.
 992 */
 993struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
 994{
 995	struct pci_saved_state *state;
 996	struct pci_cap_saved_state *tmp;
 997	struct pci_cap_saved_data *cap;
 998	struct hlist_node *pos;
 999	size_t size;
1000
1001	if (!dev->state_saved)
1002		return NULL;
1003
1004	size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1005
1006	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1007		size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1008
1009	state = kzalloc(size, GFP_KERNEL);
1010	if (!state)
1011		return NULL;
1012
1013	memcpy(state->config_space, dev->saved_config_space,
1014	       sizeof(state->config_space));
1015
1016	cap = state->cap;
1017	hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1018		size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1019		memcpy(cap, &tmp->cap, len);
1020		cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1021	}
1022	/* Empty cap_save terminates list */
1023
1024	return state;
1025}
1026EXPORT_SYMBOL_GPL(pci_store_saved_state);
1027
1028/**
1029 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1030 * @dev: PCI device that we're dealing with
1031 * @state: Saved state returned from pci_store_saved_state()
1032 */
1033int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
 
1034{
1035	struct pci_cap_saved_data *cap;
1036
1037	dev->state_saved = false;
1038
1039	if (!state)
1040		return 0;
1041
1042	memcpy(dev->saved_config_space, state->config_space,
1043	       sizeof(state->config_space));
1044
1045	cap = state->cap;
1046	while (cap->size) {
1047		struct pci_cap_saved_state *tmp;
1048
1049		tmp = pci_find_saved_cap(dev, cap->cap_nr);
1050		if (!tmp || tmp->cap.size != cap->size)
1051			return -EINVAL;
1052
1053		memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1054		cap = (struct pci_cap_saved_data *)((u8 *)cap +
1055		       sizeof(struct pci_cap_saved_data) + cap->size);
1056	}
1057
1058	dev->state_saved = true;
1059	return 0;
1060}
1061EXPORT_SYMBOL_GPL(pci_load_saved_state);
1062
1063/**
1064 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1065 *				   and free the memory allocated for it.
1066 * @dev: PCI device that we're dealing with
1067 * @state: Pointer to saved state returned from pci_store_saved_state()
1068 */
1069int pci_load_and_free_saved_state(struct pci_dev *dev,
1070				  struct pci_saved_state **state)
1071{
1072	int ret = pci_load_saved_state(dev, *state);
1073	kfree(*state);
1074	*state = NULL;
1075	return ret;
1076}
1077EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1078
 
 
 
 
 
1079static int do_pci_enable_device(struct pci_dev *dev, int bars)
1080{
1081	int err;
 
 
 
1082
1083	err = pci_set_power_state(dev, PCI_D0);
1084	if (err < 0 && err != -EIO)
1085		return err;
 
 
 
 
 
1086	err = pcibios_enable_device(dev, bars);
1087	if (err < 0)
1088		return err;
1089	pci_fixup_device(pci_fixup_enable, dev);
1090
 
 
 
 
 
 
 
 
 
 
 
1091	return 0;
1092}
1093
1094/**
1095 * pci_reenable_device - Resume abandoned device
1096 * @dev: PCI device to be resumed
1097 *
1098 *  Note this function is a backend of pci_default_resume and is not supposed
1099 *  to be called by normal code, write proper resume handler and use it instead.
1100 */
1101int pci_reenable_device(struct pci_dev *dev)
1102{
1103	if (pci_is_enabled(dev))
1104		return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1105	return 0;
1106}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1107
1108static int __pci_enable_device_flags(struct pci_dev *dev,
1109				     resource_size_t flags)
1110{
 
1111	int err;
1112	int i, bars = 0;
1113
1114	/*
1115	 * Power state could be unknown at this point, either due to a fresh
1116	 * boot or a device removal call.  So get the current power state
1117	 * so that things like MSI message writing will behave as expected
1118	 * (e.g. if the device really is in D0 at enable time).
1119	 */
1120	if (dev->pm_cap) {
1121		u16 pmcsr;
1122		pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1123		dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1124	}
1125
1126	if (atomic_add_return(1, &dev->enable_cnt) > 1)
1127		return 0;		/* already enabled */
1128
1129	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
 
 
 
 
 
 
 
 
1130		if (dev->resource[i].flags & flags)
1131			bars |= (1 << i);
1132
1133	err = do_pci_enable_device(dev, bars);
1134	if (err < 0)
1135		atomic_dec(&dev->enable_cnt);
1136	return err;
1137}
1138
1139/**
1140 * pci_enable_device_io - Initialize a device for use with IO space
1141 * @dev: PCI device to be initialized
1142 *
1143 *  Initialize device before it's used by a driver. Ask low-level code
1144 *  to enable I/O resources. Wake up the device if it was suspended.
1145 *  Beware, this function can fail.
1146 */
1147int pci_enable_device_io(struct pci_dev *dev)
1148{
1149	return __pci_enable_device_flags(dev, IORESOURCE_IO);
1150}
 
1151
1152/**
1153 * pci_enable_device_mem - Initialize a device for use with Memory space
1154 * @dev: PCI device to be initialized
1155 *
1156 *  Initialize device before it's used by a driver. Ask low-level code
1157 *  to enable Memory resources. Wake up the device if it was suspended.
1158 *  Beware, this function can fail.
1159 */
1160int pci_enable_device_mem(struct pci_dev *dev)
1161{
1162	return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1163}
 
1164
1165/**
1166 * pci_enable_device - Initialize device before it's used by a driver.
1167 * @dev: PCI device to be initialized
1168 *
1169 *  Initialize device before it's used by a driver. Ask low-level code
1170 *  to enable I/O and memory. Wake up the device if it was suspended.
1171 *  Beware, this function can fail.
1172 *
1173 *  Note we don't actually enable the device many times if we call
1174 *  this function repeatedly (we just increment the count).
1175 */
1176int pci_enable_device(struct pci_dev *dev)
1177{
1178	return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
1179}
 
1180
1181/*
1182 * Managed PCI resources.  This manages device on/off, intx/msi/msix
1183 * on/off and BAR regions.  pci_dev itself records msi/msix status, so
1184 * there's no need to track it separately.  pci_devres is initialized
1185 * when a device is enabled using managed PCI device enable interface.
1186 */
1187struct pci_devres {
1188	unsigned int enabled:1;
1189	unsigned int pinned:1;
1190	unsigned int orig_intx:1;
1191	unsigned int restore_intx:1;
 
1192	u32 region_mask;
1193};
1194
1195static void pcim_release(struct device *gendev, void *res)
1196{
1197	struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1198	struct pci_devres *this = res;
1199	int i;
1200
1201	if (dev->msi_enabled)
1202		pci_disable_msi(dev);
1203	if (dev->msix_enabled)
1204		pci_disable_msix(dev);
1205
1206	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1207		if (this->region_mask & (1 << i))
1208			pci_release_region(dev, i);
1209
 
 
 
1210	if (this->restore_intx)
1211		pci_intx(dev, this->orig_intx);
1212
1213	if (this->enabled && !this->pinned)
1214		pci_disable_device(dev);
1215}
1216
1217static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1218{
1219	struct pci_devres *dr, *new_dr;
1220
1221	dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1222	if (dr)
1223		return dr;
1224
1225	new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1226	if (!new_dr)
1227		return NULL;
1228	return devres_get(&pdev->dev, new_dr, NULL, NULL);
1229}
1230
1231static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1232{
1233	if (pci_is_managed(pdev))
1234		return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1235	return NULL;
1236}
1237
1238/**
1239 * pcim_enable_device - Managed pci_enable_device()
1240 * @pdev: PCI device to be initialized
1241 *
1242 * Managed pci_enable_device().
1243 */
1244int pcim_enable_device(struct pci_dev *pdev)
1245{
1246	struct pci_devres *dr;
1247	int rc;
1248
1249	dr = get_pci_dr(pdev);
1250	if (unlikely(!dr))
1251		return -ENOMEM;
1252	if (dr->enabled)
1253		return 0;
1254
1255	rc = pci_enable_device(pdev);
1256	if (!rc) {
1257		pdev->is_managed = 1;
1258		dr->enabled = 1;
1259	}
1260	return rc;
1261}
 
1262
1263/**
1264 * pcim_pin_device - Pin managed PCI device
1265 * @pdev: PCI device to pin
1266 *
1267 * Pin managed PCI device @pdev.  Pinned device won't be disabled on
1268 * driver detach.  @pdev must have been enabled with
1269 * pcim_enable_device().
1270 */
1271void pcim_pin_device(struct pci_dev *pdev)
1272{
1273	struct pci_devres *dr;
1274
1275	dr = find_pci_dr(pdev);
1276	WARN_ON(!dr || !dr->enabled);
1277	if (dr)
1278		dr->pinned = 1;
1279}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1280
1281/**
1282 * pcibios_disable_device - disable arch specific PCI resources for device dev
1283 * @dev: the PCI device to disable
1284 *
1285 * Disables architecture specific PCI resources for the device. This
1286 * is the default implementation. Architecture implementations can
1287 * override this.
1288 */
1289void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
 
 
 
 
 
 
 
 
 
 
 
1290
1291static void do_pci_disable_device(struct pci_dev *dev)
1292{
1293	u16 pci_command;
1294
1295	pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1296	if (pci_command & PCI_COMMAND_MASTER) {
1297		pci_command &= ~PCI_COMMAND_MASTER;
1298		pci_write_config_word(dev, PCI_COMMAND, pci_command);
1299	}
1300
1301	pcibios_disable_device(dev);
1302}
1303
1304/**
1305 * pci_disable_enabled_device - Disable device without updating enable_cnt
1306 * @dev: PCI device to disable
1307 *
1308 * NOTE: This function is a backend of PCI power management routines and is
1309 * not supposed to be called drivers.
1310 */
1311void pci_disable_enabled_device(struct pci_dev *dev)
1312{
1313	if (pci_is_enabled(dev))
1314		do_pci_disable_device(dev);
1315}
1316
1317/**
1318 * pci_disable_device - Disable PCI device after use
1319 * @dev: PCI device to be disabled
1320 *
1321 * Signal to the system that the PCI device is not in use by the system
1322 * anymore.  This only involves disabling PCI bus-mastering, if active.
1323 *
1324 * Note we don't actually disable the device until all callers of
1325 * pci_enable_device() have called pci_disable_device().
1326 */
1327void
1328pci_disable_device(struct pci_dev *dev)
1329{
1330	struct pci_devres *dr;
1331
1332	dr = find_pci_dr(dev);
1333	if (dr)
1334		dr->enabled = 0;
1335
1336	if (atomic_sub_return(1, &dev->enable_cnt) != 0)
 
 
 
1337		return;
1338
1339	do_pci_disable_device(dev);
1340
1341	dev->is_busmaster = 0;
1342}
 
1343
1344/**
1345 * pcibios_set_pcie_reset_state - set reset state for device dev
1346 * @dev: the PCIe device reset
1347 * @state: Reset state to enter into
1348 *
1349 *
1350 * Sets the PCIe reset state for the device. This is the default
1351 * implementation. Architecture implementations can override this.
1352 */
1353int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1354							enum pcie_reset_state state)
1355{
1356	return -EINVAL;
1357}
1358
1359/**
1360 * pci_set_pcie_reset_state - set reset state for device dev
1361 * @dev: the PCIe device reset
1362 * @state: Reset state to enter into
1363 *
1364 *
1365 * Sets the PCI reset state for the device.
1366 */
1367int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1368{
1369	return pcibios_set_pcie_reset_state(dev, state);
1370}
 
 
 
 
 
 
 
 
 
 
1371
1372/**
1373 * pci_check_pme_status - Check if given device has generated PME.
1374 * @dev: Device to check.
1375 *
1376 * Check the PME status of the device and if set, clear it and clear PME enable
1377 * (if set).  Return 'true' if PME status and PME enable were both set or
1378 * 'false' otherwise.
1379 */
1380bool pci_check_pme_status(struct pci_dev *dev)
1381{
1382	int pmcsr_pos;
1383	u16 pmcsr;
1384	bool ret = false;
1385
1386	if (!dev->pm_cap)
1387		return false;
1388
1389	pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1390	pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1391	if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1392		return false;
1393
1394	/* Clear PME status. */
1395	pmcsr |= PCI_PM_CTRL_PME_STATUS;
1396	if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1397		/* Disable PME to avoid interrupt flood. */
1398		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1399		ret = true;
1400	}
1401
1402	pci_write_config_word(dev, pmcsr_pos, pmcsr);
1403
1404	return ret;
1405}
1406
1407/**
1408 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1409 * @dev: Device to handle.
1410 * @ign: Ignored.
1411 *
1412 * Check if @dev has generated PME and queue a resume request for it in that
1413 * case.
1414 */
1415static int pci_pme_wakeup(struct pci_dev *dev, void *ign)
1416{
 
 
 
1417	if (pci_check_pme_status(dev)) {
1418		pci_wakeup_event(dev);
1419		pm_request_resume(&dev->dev);
1420	}
1421	return 0;
1422}
1423
1424/**
1425 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1426 * @bus: Top bus of the subtree to walk.
1427 */
1428void pci_pme_wakeup_bus(struct pci_bus *bus)
1429{
1430	if (bus)
1431		pci_walk_bus(bus, pci_pme_wakeup, NULL);
1432}
1433
 
1434/**
1435 * pci_pme_capable - check the capability of PCI device to generate PME#
1436 * @dev: PCI device to handle.
1437 * @state: PCI state from which device will issue PME#.
1438 */
1439bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
1440{
1441	if (!dev->pm_cap)
1442		return false;
1443
1444	return !!(dev->pme_support & (1 << state));
1445}
 
1446
1447static void pci_pme_list_scan(struct work_struct *work)
1448{
1449	struct pci_pme_device *pme_dev;
1450
1451	mutex_lock(&pci_pme_list_mutex);
1452	if (!list_empty(&pci_pme_list)) {
1453		list_for_each_entry(pme_dev, &pci_pme_list, list)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1454			pci_pme_wakeup(pme_dev->dev, NULL);
1455		schedule_delayed_work(&pci_pme_work, msecs_to_jiffies(PME_TIMEOUT));
 
 
 
1456	}
 
 
 
1457	mutex_unlock(&pci_pme_list_mutex);
1458}
1459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1460/**
1461 * pci_external_pme - is a device an external PCI PME source?
1462 * @dev: PCI device to check
1463 *
1464 */
 
 
 
1465
1466static bool pci_external_pme(struct pci_dev *dev)
1467{
1468	if (pci_is_pcie(dev) || dev->bus->number == 0)
1469		return false;
1470	return true;
 
 
 
 
 
 
 
1471}
1472
1473/**
1474 * pci_pme_active - enable or disable PCI device's PME# function
1475 * @dev: PCI device to handle.
1476 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1477 *
1478 * The caller must verify that the device is capable of generating PME# before
1479 * calling this function with @enable equal to 'true'.
1480 */
1481void pci_pme_active(struct pci_dev *dev, bool enable)
1482{
1483	u16 pmcsr;
1484
1485	if (!dev->pm_cap)
1486		return;
1487
1488	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1489	/* Clear PME_Status by writing 1 to it and enable PME# */
1490	pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1491	if (!enable)
1492		pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1493
1494	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
 
 
 
 
 
 
 
 
 
1495
1496	/* PCI (as opposed to PCIe) PME requires that the device have
1497	   its PME# line hooked up correctly. Not all hardware vendors
1498	   do this, so the PME never gets delivered and the device
1499	   remains asleep. The easiest way around this is to
1500	   periodically walk the list of suspended devices and check
1501	   whether any have their PME flag set. The assumption is that
1502	   we'll wake up often enough anyway that this won't be a huge
1503	   hit, and the power savings from the devices will still be a
1504	   win. */
1505
1506	if (pci_external_pme(dev)) {
1507		struct pci_pme_device *pme_dev;
1508		if (enable) {
1509			pme_dev = kmalloc(sizeof(struct pci_pme_device),
1510					  GFP_KERNEL);
1511			if (!pme_dev)
1512				goto out;
 
 
1513			pme_dev->dev = dev;
1514			mutex_lock(&pci_pme_list_mutex);
1515			list_add(&pme_dev->list, &pci_pme_list);
1516			if (list_is_singular(&pci_pme_list))
1517				schedule_delayed_work(&pci_pme_work,
1518						      msecs_to_jiffies(PME_TIMEOUT));
 
1519			mutex_unlock(&pci_pme_list_mutex);
1520		} else {
1521			mutex_lock(&pci_pme_list_mutex);
1522			list_for_each_entry(pme_dev, &pci_pme_list, list) {
1523				if (pme_dev->dev == dev) {
1524					list_del(&pme_dev->list);
1525					kfree(pme_dev);
1526					break;
1527				}
1528			}
1529			mutex_unlock(&pci_pme_list_mutex);
1530		}
1531	}
1532
1533out:
1534	dev_printk(KERN_DEBUG, &dev->dev, "PME# %s\n",
1535			enable ? "enabled" : "disabled");
1536}
 
1537
1538/**
1539 * __pci_enable_wake - enable PCI device as wakeup event source
1540 * @dev: PCI device affected
1541 * @state: PCI state from which device will issue wakeup events
1542 * @runtime: True if the events are to be generated at run time
1543 * @enable: True to enable event generation; false to disable
1544 *
1545 * This enables the device as a wakeup event source, or disables it.
1546 * When such events involves platform-specific hooks, those hooks are
1547 * called automatically by this routine.
1548 *
1549 * Devices with legacy power management (no standard PCI PM capabilities)
1550 * always require such platform hooks.
1551 *
1552 * RETURN VALUE:
1553 * 0 is returned on success
1554 * -EINVAL is returned if device is not supposed to wake up the system
1555 * Error code depending on the platform is returned if both the platform and
1556 * the native mechanism fail to enable the generation of wake-up events
1557 */
1558int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1559		      bool runtime, bool enable)
1560{
1561	int ret = 0;
1562
1563	if (enable && !runtime && !device_may_wakeup(&dev->dev))
1564		return -EINVAL;
 
 
 
 
 
 
 
1565
1566	/* Don't do the same thing twice in a row for one device. */
1567	if (!!enable == !!dev->wakeup_prepared)
1568		return 0;
1569
1570	/*
1571	 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1572	 * Anderson we should be doing PME# wake enable followed by ACPI wake
1573	 * enable.  To disable wake-up we call the platform first, for symmetry.
1574	 */
1575
1576	if (enable) {
1577		int error;
1578
1579		if (pci_pme_capable(dev, state))
1580			pci_pme_active(dev, true);
1581		else
1582			ret = 1;
1583		error = runtime ? platform_pci_run_wake(dev, true) :
1584					platform_pci_sleep_wake(dev, true);
1585		if (ret)
1586			ret = error;
1587		if (!ret)
1588			dev->wakeup_prepared = true;
1589	} else {
1590		if (runtime)
1591			platform_pci_run_wake(dev, false);
1592		else
1593			platform_pci_sleep_wake(dev, false);
1594		pci_pme_active(dev, false);
1595		dev->wakeup_prepared = false;
1596	}
1597
1598	return ret;
1599}
1600EXPORT_SYMBOL(__pci_enable_wake);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1601
1602/**
1603 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1604 * @dev: PCI device to prepare
1605 * @enable: True to enable wake-up event generation; false to disable
1606 *
1607 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1608 * and this function allows them to set that up cleanly - pci_enable_wake()
1609 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1610 * ordering constraints.
1611 *
1612 * This function only returns error code if the device is not capable of
1613 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1614 * enable wake-up power for it.
1615 */
1616int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1617{
1618	return pci_pme_capable(dev, PCI_D3cold) ?
1619			pci_enable_wake(dev, PCI_D3cold, enable) :
1620			pci_enable_wake(dev, PCI_D3hot, enable);
1621}
 
1622
1623/**
1624 * pci_target_state - find an appropriate low power state for a given PCI dev
1625 * @dev: PCI device
 
1626 *
1627 * Use underlying platform code to find a supported low power state for @dev.
1628 * If the platform can't manage @dev, return the deepest state from which it
1629 * can generate wake events, based on any available PME info.
1630 */
1631pci_power_t pci_target_state(struct pci_dev *dev)
1632{
1633	pci_power_t target_state = PCI_D3hot;
1634
1635	if (platform_pci_power_manageable(dev)) {
1636		/*
1637		 * Call the platform to choose the target state of the device
1638		 * and enable wake-up from this state if supported.
1639		 */
1640		pci_power_t state = platform_pci_choose_state(dev);
1641
1642		switch (state) {
1643		case PCI_POWER_ERROR:
1644		case PCI_UNKNOWN:
1645			break;
1646		case PCI_D1:
1647		case PCI_D2:
1648			if (pci_no_d1d2(dev))
1649				break;
 
1650		default:
1651			target_state = state;
1652		}
1653	} else if (!dev->pm_cap) {
 
 
 
 
1654		target_state = PCI_D0;
1655	} else if (device_may_wakeup(&dev->dev)) {
 
 
 
 
 
 
 
 
 
1656		/*
1657		 * Find the deepest state from which the device can generate
1658		 * wake-up events, make it the target state and enable device
1659		 * to generate PME#.
1660		 */
1661		if (dev->pme_support) {
1662			while (target_state
1663			      && !(dev->pme_support & (1 << target_state)))
1664				target_state--;
1665		}
1666	}
1667
1668	return target_state;
1669}
1670
1671/**
1672 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
 
1673 * @dev: Device to handle.
1674 *
1675 * Choose the power state appropriate for the device depending on whether
1676 * it can wake up the system and/or is power manageable by the platform
1677 * (PCI_D3hot is the default) and put the device into that state.
1678 */
1679int pci_prepare_to_sleep(struct pci_dev *dev)
1680{
1681	pci_power_t target_state = pci_target_state(dev);
 
1682	int error;
1683
1684	if (target_state == PCI_POWER_ERROR)
1685		return -EIO;
1686
1687	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1688
1689	error = pci_set_power_state(dev, target_state);
1690
1691	if (error)
1692		pci_enable_wake(dev, target_state, false);
1693
1694	return error;
1695}
 
1696
1697/**
1698 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
 
1699 * @dev: Device to handle.
1700 *
1701 * Disable device's system wake-up capability and put it into D0.
1702 */
1703int pci_back_from_sleep(struct pci_dev *dev)
1704{
1705	pci_enable_wake(dev, PCI_D0, false);
1706	return pci_set_power_state(dev, PCI_D0);
1707}
 
1708
1709/**
1710 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1711 * @dev: PCI device being suspended.
1712 *
1713 * Prepare @dev to generate wake-up events at run time and put it into a low
1714 * power state.
1715 */
1716int pci_finish_runtime_suspend(struct pci_dev *dev)
1717{
1718	pci_power_t target_state = pci_target_state(dev);
1719	int error;
1720
 
1721	if (target_state == PCI_POWER_ERROR)
1722		return -EIO;
1723
1724	__pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
 
 
1725
1726	error = pci_set_power_state(dev, target_state);
1727
1728	if (error)
1729		__pci_enable_wake(dev, target_state, true, false);
 
 
1730
1731	return error;
1732}
1733
1734/**
1735 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1736 * @dev: Device to check.
1737 *
1738 * Return true if the device itself is cabable of generating wake-up events
1739 * (through the platform or using the native PCIe PME) or if the device supports
1740 * PME and one of its upstream bridges can generate wake-up events.
1741 */
1742bool pci_dev_run_wake(struct pci_dev *dev)
1743{
1744	struct pci_bus *bus = dev->bus;
1745
1746	if (device_run_wake(&dev->dev))
1747		return true;
1748
1749	if (!dev->pme_support)
 
1750		return false;
1751
 
 
 
1752	while (bus->parent) {
1753		struct pci_dev *bridge = bus->self;
1754
1755		if (device_run_wake(&bridge->dev))
1756			return true;
1757
1758		bus = bus->parent;
1759	}
1760
1761	/* We have reached the root bus. */
1762	if (bus->bridge)
1763		return device_run_wake(bus->bridge);
1764
1765	return false;
1766}
1767EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1768
1769/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1770 * pci_pm_init - Initialize PM functions of given PCI device
1771 * @dev: PCI device to handle.
1772 */
1773void pci_pm_init(struct pci_dev *dev)
1774{
1775	int pm;
 
1776	u16 pmc;
1777
1778	pm_runtime_forbid(&dev->dev);
 
 
1779	device_enable_async_suspend(&dev->dev);
1780	dev->wakeup_prepared = false;
1781
1782	dev->pm_cap = 0;
 
1783
1784	/* find PCI PM capability in list */
1785	pm = pci_find_capability(dev, PCI_CAP_ID_PM);
1786	if (!pm)
1787		return;
1788	/* Check device's ability to generate PME# */
1789	pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
1790
1791	if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1792		dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1793			pmc & PCI_PM_CAP_VER_MASK);
1794		return;
1795	}
1796
1797	dev->pm_cap = pm;
1798	dev->d3_delay = PCI_PM_D3_WAIT;
 
 
 
1799
1800	dev->d1_support = false;
1801	dev->d2_support = false;
1802	if (!pci_no_d1d2(dev)) {
1803		if (pmc & PCI_PM_CAP_D1)
1804			dev->d1_support = true;
1805		if (pmc & PCI_PM_CAP_D2)
1806			dev->d2_support = true;
1807
1808		if (dev->d1_support || dev->d2_support)
1809			dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
1810				   dev->d1_support ? " D1" : "",
1811				   dev->d2_support ? " D2" : "");
1812	}
1813
1814	pmc &= PCI_PM_CAP_PME_MASK;
1815	if (pmc) {
1816		dev_printk(KERN_DEBUG, &dev->dev,
1817			 "PME# supported from%s%s%s%s%s\n",
1818			 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1819			 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1820			 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1821			 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1822			 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
1823		dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
 
1824		/*
1825		 * Make device's PM flags reflect the wake-up capability, but
1826		 * let the user space enable it to wake up the system as needed.
1827		 */
1828		device_set_wakeup_capable(&dev->dev, true);
1829		/* Disable the PME# generation functionality */
1830		pci_pme_active(dev, false);
1831	} else {
1832		dev->pme_support = 0;
1833	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1834}
1835
1836/**
1837 * platform_pci_wakeup_init - init platform wakeup if present
1838 * @dev: PCI device
1839 *
1840 * Some devices don't have PCI PM caps but can still generate wakeup
1841 * events through platform methods (like ACPI events).  If @dev supports
1842 * platform wakeup events, set the device flag to indicate as much.  This
1843 * may be redundant if the device also supports PCI PM caps, but double
1844 * initialization should be safe in that case.
1845 */
1846void platform_pci_wakeup_init(struct pci_dev *dev)
1847{
1848	if (!platform_pci_can_wakeup(dev))
 
 
 
 
 
 
 
1849		return;
1850
1851	device_set_wakeup_capable(&dev->dev, true);
1852	platform_pci_sleep_wake(dev, false);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1853}
1854
1855/**
1856 * pci_add_save_buffer - allocate buffer for saving given capability registers
 
1857 * @dev: the PCI device
1858 * @cap: the capability to allocate the buffer for
 
1859 * @size: requested size of the buffer
1860 */
1861static int pci_add_cap_save_buffer(
1862	struct pci_dev *dev, char cap, unsigned int size)
1863{
1864	int pos;
1865	struct pci_cap_saved_state *save_state;
1866
1867	pos = pci_find_capability(dev, cap);
1868	if (pos <= 0)
 
 
 
 
1869		return 0;
1870
1871	save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1872	if (!save_state)
1873		return -ENOMEM;
1874
1875	save_state->cap.cap_nr = cap;
 
1876	save_state->cap.size = size;
1877	pci_add_saved_cap(dev, save_state);
1878
1879	return 0;
1880}
1881
 
 
 
 
 
 
 
 
 
 
1882/**
1883 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1884 * @dev: the PCI device
1885 */
1886void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1887{
1888	int error;
1889
1890	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1891					PCI_EXP_SAVE_REGS * sizeof(u16));
1892	if (error)
1893		dev_err(&dev->dev,
1894			"unable to preallocate PCI Express save buffer\n");
1895
1896	error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1897	if (error)
1898		dev_err(&dev->dev,
1899			"unable to preallocate PCI-X save buffer\n");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1900}
1901
1902/**
1903 * pci_enable_ari - enable ARI forwarding if hardware support it
1904 * @dev: the PCI device
 
 
 
1905 */
1906void pci_enable_ari(struct pci_dev *dev)
1907{
1908	int pos;
1909	u32 cap;
1910	u16 flags, ctrl;
1911	struct pci_dev *bridge;
1912
1913	if (!pci_is_pcie(dev) || dev->devfn)
1914		return;
1915
1916	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1917	if (!pos)
1918		return;
1919
1920	bridge = dev->bus->self;
1921	if (!bridge || !pci_is_pcie(bridge))
1922		return;
1923
1924	pos = pci_pcie_cap(bridge);
1925	if (!pos)
1926		return;
1927
1928	/* ARI is a PCIe v2 feature */
1929	pci_read_config_word(bridge, pos + PCI_EXP_FLAGS, &flags);
1930	if ((flags & PCI_EXP_FLAGS_VERS) < 2)
1931		return;
 
 
 
 
 
 
1932
1933	pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
1934	if (!(cap & PCI_EXP_DEVCAP2_ARI))
1935		return;
1936
1937	pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
1938	ctrl |= PCI_EXP_DEVCTL2_ARI;
1939	pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
 
 
 
 
1940
1941	bridge->ari_enabled = 1;
1942}
1943
1944/**
1945 * pci_enable_ido - enable ID-based ordering on a device
1946 * @dev: the PCI device
1947 * @type: which types of IDO to enable
1948 *
1949 * Enable ID-based ordering on @dev.  @type can contain the bits
1950 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
1951 * which types of transactions are allowed to be re-ordered.
1952 */
1953void pci_enable_ido(struct pci_dev *dev, unsigned long type)
1954{
 
 
1955	int pos;
1956	u16 ctrl;
1957
1958	pos = pci_pcie_cap(dev);
1959	if (!pos)
1960		return;
1961
1962	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1963	if (type & PCI_EXP_IDO_REQUEST)
1964		ctrl |= PCI_EXP_IDO_REQ_EN;
1965	if (type & PCI_EXP_IDO_COMPLETION)
1966		ctrl |= PCI_EXP_IDO_CMP_EN;
1967	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1968}
1969EXPORT_SYMBOL(pci_enable_ido);
 
 
 
 
 
 
 
 
 
 
 
1970
1971/**
1972 * pci_disable_ido - disable ID-based ordering on a device
1973 * @dev: the PCI device
1974 * @type: which types of IDO to disable
1975 */
1976void pci_disable_ido(struct pci_dev *dev, unsigned long type)
1977{
1978	int pos;
1979	u16 ctrl;
1980
1981	if (!pci_is_pcie(dev))
1982		return;
1983
1984	pos = pci_pcie_cap(dev);
1985	if (!pos)
 
1986		return;
 
 
 
 
 
 
1987
1988	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
1989	if (type & PCI_EXP_IDO_REQUEST)
1990		ctrl &= ~PCI_EXP_IDO_REQ_EN;
1991	if (type & PCI_EXP_IDO_COMPLETION)
1992		ctrl &= ~PCI_EXP_IDO_CMP_EN;
1993	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
1994}
1995EXPORT_SYMBOL(pci_disable_ido);
1996
1997/**
1998 * pci_enable_obff - enable optimized buffer flush/fill
1999 * @dev: PCI device
2000 * @type: type of signaling to use
2001 *
2002 * Try to enable @type OBFF signaling on @dev.  It will try using WAKE#
2003 * signaling if possible, falling back to message signaling only if
2004 * WAKE# isn't supported.  @type should indicate whether the PCIe link
2005 * be brought out of L0s or L1 to send the message.  It should be either
2006 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2007 *
2008 * If your device can benefit from receiving all messages, even at the
2009 * power cost of bringing the link back up from a low power state, use
2010 * %PCI_EXP_OBFF_SIGNAL_ALWAYS.  Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2011 * preferred type).
2012 *
2013 * RETURNS:
2014 * Zero on success, appropriate error number on failure.
2015 */
2016int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2017{
2018	int pos;
2019	u32 cap;
2020	u16 ctrl;
2021	int ret;
2022
2023	if (!pci_is_pcie(dev))
2024		return -ENOTSUPP;
 
 
 
 
 
 
 
2025
2026	pos = pci_pcie_cap(dev);
2027	if (!pos)
2028		return -ENOTSUPP;
2029
2030	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2031	if (!(cap & PCI_EXP_OBFF_MASK))
2032		return -ENOTSUPP; /* no OBFF support at all */
2033
2034	/* Make sure the topology supports OBFF as well */
2035	if (dev->bus) {
2036		ret = pci_enable_obff(dev->bus->self, type);
2037		if (ret)
2038			return ret;
2039	}
2040
2041	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2042	if (cap & PCI_EXP_OBFF_WAKE)
2043		ctrl |= PCI_EXP_OBFF_WAKE_EN;
2044	else {
2045		switch (type) {
2046		case PCI_EXP_OBFF_SIGNAL_L0:
2047			if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2048				ctrl |= PCI_EXP_OBFF_MSGA_EN;
2049			break;
2050		case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2051			ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2052			ctrl |= PCI_EXP_OBFF_MSGB_EN;
2053			break;
2054		default:
2055			WARN(1, "bad OBFF signal type\n");
2056			return -ENOTSUPP;
2057		}
2058	}
2059	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2060
2061	return 0;
2062}
2063EXPORT_SYMBOL(pci_enable_obff);
2064
2065/**
2066 * pci_disable_obff - disable optimized buffer flush/fill
2067 * @dev: PCI device
2068 *
2069 * Disable OBFF on @dev.
2070 */
2071void pci_disable_obff(struct pci_dev *dev)
2072{
2073	int pos;
2074	u16 ctrl;
2075
2076	if (!pci_is_pcie(dev))
2077		return;
2078
2079	pos = pci_pcie_cap(dev);
2080	if (!pos)
2081		return;
2082
2083	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2084	ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2085	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
 
 
 
 
 
 
2086}
2087EXPORT_SYMBOL(pci_disable_obff);
2088
2089/**
2090 * pci_ltr_supported - check whether a device supports LTR
2091 * @dev: PCI device
2092 *
2093 * RETURNS:
2094 * True if @dev supports latency tolerance reporting, false otherwise.
2095 */
2096bool pci_ltr_supported(struct pci_dev *dev)
2097{
2098	int pos;
2099	u32 cap;
2100
2101	if (!pci_is_pcie(dev))
2102		return false;
2103
2104	pos = pci_pcie_cap(dev);
2105	if (!pos)
2106		return false;
2107
2108	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
 
 
 
 
 
 
2109
2110	return cap & PCI_EXP_DEVCAP2_LTR;
 
2111}
2112EXPORT_SYMBOL(pci_ltr_supported);
2113
2114/**
2115 * pci_enable_ltr - enable latency tolerance reporting
2116 * @dev: PCI device
 
2117 *
2118 * Enable LTR on @dev if possible, which means enabling it first on
2119 * upstream ports.
2120 *
2121 * RETURNS:
2122 * Zero on success, errno on failure.
 
 
 
 
 
2123 */
2124int pci_enable_ltr(struct pci_dev *dev)
2125{
2126	int pos;
2127	u16 ctrl;
2128	int ret;
2129
2130	if (!pci_ltr_supported(dev))
2131		return -ENOTSUPP;
 
2132
2133	pos = pci_pcie_cap(dev);
2134	if (!pos)
2135		return -ENOTSUPP;
 
 
 
 
2136
2137	/* Only primary function can enable/disable LTR */
2138	if (PCI_FUNC(dev->devfn) != 0)
2139		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2140
2141	/* Enable upstream ports first */
2142	if (dev->bus) {
2143		ret = pci_enable_ltr(dev->bus->self);
2144		if (ret)
2145			return ret;
2146	}
2147
2148	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2149	ctrl |= PCI_EXP_LTR_EN;
2150	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2151
2152	return 0;
2153}
2154EXPORT_SYMBOL(pci_enable_ltr);
2155
2156/**
2157 * pci_disable_ltr - disable latency tolerance reporting
2158 * @dev: PCI device
 
 
 
 
 
2159 */
2160void pci_disable_ltr(struct pci_dev *dev)
 
2161{
2162	int pos;
2163	u16 ctrl;
 
 
2164
2165	if (!pci_ltr_supported(dev))
2166		return;
2167
2168	pos = pci_pcie_cap(dev);
2169	if (!pos)
2170		return;
2171
2172	/* Only primary function can enable/disable LTR */
2173	if (PCI_FUNC(dev->devfn) != 0)
2174		return;
2175
2176	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2177	ctrl &= ~PCI_EXP_LTR_EN;
2178	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2179}
2180EXPORT_SYMBOL(pci_disable_ltr);
2181
2182static int __pci_ltr_scale(int *val)
 
 
 
 
 
 
 
 
 
2183{
2184	int scale = 0;
 
 
 
 
 
2185
2186	while (*val > 1023) {
2187		*val = (*val + 31) / 32;
2188		scale++;
 
 
 
 
 
 
 
 
2189	}
2190	return scale;
 
2191}
2192
2193/**
2194 * pci_set_ltr - set LTR latency values
2195 * @dev: PCI device
2196 * @snoop_lat_ns: snoop latency in nanoseconds
2197 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2198 *
2199 * Figure out the scale and set the LTR values accordingly.
 
2200 */
2201int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2202{
2203	int pos, ret, snoop_scale, nosnoop_scale;
2204	u16 val;
2205
2206	if (!pci_ltr_supported(dev))
2207		return -ENOTSUPP;
 
2208
2209	snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2210	nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
 
2211
2212	if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2213	    nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2214		return -EINVAL;
2215
2216	if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2217	    (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2218		return -EINVAL;
2219
2220	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2221	if (!pos)
2222		return -ENOTSUPP;
2223
2224	val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2225	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2226	if (ret != 4)
2227		return -EIO;
2228
2229	val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2230	ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2231	if (ret != 4)
2232		return -EIO;
2233
2234	return 0;
 
2235}
2236EXPORT_SYMBOL(pci_set_ltr);
2237
2238static int pci_acs_enable;
2239
2240/**
2241 * pci_request_acs - ask for ACS to be enabled if supported
 
 
 
 
 
 
2242 */
2243void pci_request_acs(void)
2244{
2245	pci_acs_enable = 1;
 
 
 
 
 
 
 
 
 
 
 
2246}
2247
2248/**
2249 * pci_enable_acs - enable ACS if hardware support it
2250 * @dev: the PCI device
 
 
 
 
 
 
 
 
 
2251 */
2252void pci_enable_acs(struct pci_dev *dev)
2253{
2254	int pos;
2255	u16 cap;
2256	u16 ctrl;
 
 
 
2257
2258	if (!pci_acs_enable)
2259		return;
 
 
 
 
2260
2261	if (!pci_is_pcie(dev))
2262		return;
 
 
 
 
 
 
2263
2264	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2265	if (!pos)
2266		return;
2267
2268	pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2269	pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2270
2271	/* Source Validation */
2272	ctrl |= (cap & PCI_ACS_SV);
 
 
 
 
 
2273
2274	/* P2P Request Redirect */
2275	ctrl |= (cap & PCI_ACS_RR);
 
 
 
 
2276
2277	/* P2P Completion Redirect */
2278	ctrl |= (cap & PCI_ACS_CR);
 
 
 
 
 
2279
2280	/* Upstream Forwarding */
2281	ctrl |= (cap & PCI_ACS_UF);
2282
2283	pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
 
 
2284}
 
2285
2286/**
2287 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2288 * @dev: the PCI device
2289 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2290 *
2291 * Perform INTx swizzling for a device behind one level of bridge.  This is
2292 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
2293 * behind bridges on add-in cards.  For devices with ARI enabled, the slot
2294 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2295 * the PCI Express Base Specification, Revision 2.1)
2296 */
2297u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
2298{
2299	int slot;
2300
2301	if (pci_ari_enabled(dev->bus))
2302		slot = 0;
2303	else
2304		slot = PCI_SLOT(dev->devfn);
2305
2306	return (((pin - 1) + slot) % 4) + 1;
2307}
2308
2309int
2310pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2311{
2312	u8 pin;
2313
2314	pin = dev->pin;
2315	if (!pin)
2316		return -1;
2317
2318	while (!pci_is_root_bus(dev->bus)) {
2319		pin = pci_swizzle_interrupt_pin(dev, pin);
2320		dev = dev->bus->self;
2321	}
2322	*bridge = dev;
2323	return pin;
2324}
2325
2326/**
2327 * pci_common_swizzle - swizzle INTx all the way to root bridge
2328 * @dev: the PCI device
2329 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2330 *
2331 * Perform INTx swizzling for a device.  This traverses through all PCI-to-PCI
2332 * bridges all the way up to a PCI root bus.
2333 */
2334u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2335{
2336	u8 pin = *pinp;
2337
2338	while (!pci_is_root_bus(dev->bus)) {
2339		pin = pci_swizzle_interrupt_pin(dev, pin);
2340		dev = dev->bus->self;
2341	}
2342	*pinp = pin;
2343	return PCI_SLOT(dev->devfn);
2344}
 
2345
2346/**
2347 *	pci_release_region - Release a PCI bar
2348 *	@pdev: PCI device whose resources were previously reserved by pci_request_region
2349 *	@bar: BAR to release
2350 *
2351 *	Releases the PCI I/O and memory resources previously reserved by a
2352 *	successful call to pci_request_region.  Call this function only
2353 *	after all use of the PCI regions has ceased.
 
2354 */
2355void pci_release_region(struct pci_dev *pdev, int bar)
2356{
2357	struct pci_devres *dr;
2358
2359	if (pci_resource_len(pdev, bar) == 0)
2360		return;
2361	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2362		release_region(pci_resource_start(pdev, bar),
2363				pci_resource_len(pdev, bar));
2364	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2365		release_mem_region(pci_resource_start(pdev, bar),
2366				pci_resource_len(pdev, bar));
2367
2368	dr = find_pci_dr(pdev);
2369	if (dr)
2370		dr->region_mask &= ~(1 << bar);
2371}
 
2372
2373/**
2374 *	__pci_request_region - Reserved PCI I/O and memory resource
2375 *	@pdev: PCI device whose resources are to be reserved
2376 *	@bar: BAR to be reserved
2377 *	@res_name: Name to be associated with resource.
2378 *	@exclusive: whether the region access is exclusive or not
2379 *
2380 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2381 *	being reserved by owner @res_name.  Do not access any
2382 *	address inside the PCI regions unless this call returns
2383 *	successfully.
2384 *
2385 *	If @exclusive is set, then the region is marked so that userspace
2386 *	is explicitly not allowed to map the resource via /dev/mem or
2387 * 	sysfs MMIO access.
2388 *
2389 *	Returns 0 on success, or %EBUSY on error.  A warning
2390 *	message is also printed on failure.
2391 */
2392static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2393									int exclusive)
2394{
2395	struct pci_devres *dr;
2396
2397	if (pci_resource_len(pdev, bar) == 0)
2398		return 0;
2399		
2400	if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2401		if (!request_region(pci_resource_start(pdev, bar),
2402			    pci_resource_len(pdev, bar), res_name))
2403			goto err_out;
2404	}
2405	else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
2406		if (!__request_mem_region(pci_resource_start(pdev, bar),
2407					pci_resource_len(pdev, bar), res_name,
2408					exclusive))
2409			goto err_out;
2410	}
2411
2412	dr = find_pci_dr(pdev);
2413	if (dr)
2414		dr->region_mask |= 1 << bar;
2415
2416	return 0;
2417
2418err_out:
2419	dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
2420		 &pdev->resource[bar]);
2421	return -EBUSY;
2422}
2423
2424/**
2425 *	pci_request_region - Reserve PCI I/O and memory resource
2426 *	@pdev: PCI device whose resources are to be reserved
2427 *	@bar: BAR to be reserved
2428 *	@res_name: Name to be associated with resource
2429 *
2430 *	Mark the PCI region associated with PCI device @pdev BAR @bar as
2431 *	being reserved by owner @res_name.  Do not access any
2432 *	address inside the PCI regions unless this call returns
2433 *	successfully.
2434 *
2435 *	Returns 0 on success, or %EBUSY on error.  A warning
2436 *	message is also printed on failure.
 
 
 
 
 
2437 */
2438int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2439{
2440	return __pci_request_region(pdev, bar, res_name, 0);
2441}
 
2442
2443/**
2444 *	pci_request_region_exclusive - Reserved PCI I/O and memory resource
2445 *	@pdev: PCI device whose resources are to be reserved
2446 *	@bar: BAR to be reserved
2447 *	@res_name: Name to be associated with resource.
2448 *
2449 *	Mark the PCI region associated with PCI device @pdev BR @bar as
2450 *	being reserved by owner @res_name.  Do not access any
2451 *	address inside the PCI regions unless this call returns
2452 *	successfully.
2453 *
2454 *	Returns 0 on success, or %EBUSY on error.  A warning
2455 *	message is also printed on failure.
2456 *
2457 *	The key difference that _exclusive makes it that userspace is
2458 *	explicitly not allowed to map the resource via /dev/mem or
2459 * 	sysfs.
2460 */
2461int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2462{
2463	return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2464}
2465/**
2466 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2467 * @pdev: PCI device whose resources were previously reserved
2468 * @bars: Bitmask of BARs to be released
2469 *
2470 * Release selected PCI I/O and memory resources previously reserved.
2471 * Call this function only after all use of the PCI regions has ceased.
2472 */
2473void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2474{
2475	int i;
2476
2477	for (i = 0; i < 6; i++)
2478		if (bars & (1 << i))
2479			pci_release_region(pdev, i);
2480}
 
2481
2482int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2483				 const char *res_name, int excl)
2484{
2485	int i;
2486
2487	for (i = 0; i < 6; i++)
2488		if (bars & (1 << i))
2489			if (__pci_request_region(pdev, i, res_name, excl))
2490				goto err_out;
2491	return 0;
2492
2493err_out:
2494	while(--i >= 0)
2495		if (bars & (1 << i))
2496			pci_release_region(pdev, i);
2497
2498	return -EBUSY;
2499}
2500
2501
2502/**
2503 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2504 * @pdev: PCI device whose resources are to be reserved
2505 * @bars: Bitmask of BARs to be requested
2506 * @res_name: Name to be associated with resource
2507 */
2508int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2509				 const char *res_name)
2510{
2511	return __pci_request_selected_regions(pdev, bars, res_name, 0);
2512}
 
2513
2514int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2515				 int bars, const char *res_name)
2516{
2517	return __pci_request_selected_regions(pdev, bars, res_name,
2518			IORESOURCE_EXCLUSIVE);
2519}
 
2520
2521/**
2522 *	pci_release_regions - Release reserved PCI I/O and memory resources
2523 *	@pdev: PCI device whose resources were previously reserved by pci_request_regions
2524 *
2525 *	Releases all PCI I/O and memory resources previously reserved by a
2526 *	successful call to pci_request_regions.  Call this function only
2527 *	after all use of the PCI regions has ceased.
 
2528 */
2529
2530void pci_release_regions(struct pci_dev *pdev)
2531{
2532	pci_release_selected_regions(pdev, (1 << 6) - 1);
2533}
 
2534
2535/**
2536 *	pci_request_regions - Reserved PCI I/O and memory resources
2537 *	@pdev: PCI device whose resources are to be reserved
2538 *	@res_name: Name to be associated with resource.
2539 *
2540 *	Mark all PCI regions associated with PCI device @pdev as
2541 *	being reserved by owner @res_name.  Do not access any
2542 *	address inside the PCI regions unless this call returns
2543 *	successfully.
2544 *
2545 *	Returns 0 on success, or %EBUSY on error.  A warning
2546 *	message is also printed on failure.
 
 
 
 
 
2547 */
2548int pci_request_regions(struct pci_dev *pdev, const char *res_name)
2549{
2550	return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
2551}
 
2552
2553/**
2554 *	pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2555 *	@pdev: PCI device whose resources are to be reserved
2556 *	@res_name: Name to be associated with resource.
2557 *
2558 *	Mark all PCI regions associated with PCI device @pdev as
2559 *	being reserved by owner @res_name.  Do not access any
2560 *	address inside the PCI regions unless this call returns
2561 *	successfully.
2562 *
2563 *	pci_request_regions_exclusive() will mark the region so that
2564 * 	/dev/mem and the sysfs MMIO access will not be allowed.
 
2565 *
2566 *	Returns 0 on success, or %EBUSY on error.  A warning
2567 *	message is also printed on failure.
 
 
 
2568 */
2569int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2570{
2571	return pci_request_selected_regions_exclusive(pdev,
2572					((1 << 6) - 1), res_name);
2573}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2574
2575static void __pci_set_master(struct pci_dev *dev, bool enable)
2576{
2577	u16 old_cmd, cmd;
2578
2579	pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2580	if (enable)
2581		cmd = old_cmd | PCI_COMMAND_MASTER;
2582	else
2583		cmd = old_cmd & ~PCI_COMMAND_MASTER;
2584	if (cmd != old_cmd) {
2585		dev_dbg(&dev->dev, "%s bus mastering\n",
2586			enable ? "enabling" : "disabling");
2587		pci_write_config_word(dev, PCI_COMMAND, cmd);
2588	}
2589	dev->is_busmaster = enable;
2590}
2591
2592/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2593 * pci_set_master - enables bus-mastering for device dev
2594 * @dev: the PCI device to enable
2595 *
2596 * Enables bus-mastering on the device and calls pcibios_set_master()
2597 * to do the needed arch specific settings.
2598 */
2599void pci_set_master(struct pci_dev *dev)
2600{
2601	__pci_set_master(dev, true);
2602	pcibios_set_master(dev);
2603}
 
2604
2605/**
2606 * pci_clear_master - disables bus-mastering for device dev
2607 * @dev: the PCI device to disable
2608 */
2609void pci_clear_master(struct pci_dev *dev)
2610{
2611	__pci_set_master(dev, false);
2612}
 
2613
2614/**
2615 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2616 * @dev: the PCI device for which MWI is to be enabled
2617 *
2618 * Helper function for pci_set_mwi.
2619 * Originally copied from drivers/net/acenic.c.
2620 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2621 *
2622 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2623 */
2624int pci_set_cacheline_size(struct pci_dev *dev)
2625{
2626	u8 cacheline_size;
2627
2628	if (!pci_cache_line_size)
2629		return -EINVAL;
2630
2631	/* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2632	   equal to or multiple of the right value. */
2633	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2634	if (cacheline_size >= pci_cache_line_size &&
2635	    (cacheline_size % pci_cache_line_size) == 0)
2636		return 0;
2637
2638	/* Write the correct value. */
2639	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2640	/* Read it back. */
2641	pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2642	if (cacheline_size == pci_cache_line_size)
2643		return 0;
2644
2645	dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2646		   "supported\n", pci_cache_line_size << 2);
2647
2648	return -EINVAL;
2649}
2650EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2651
2652#ifdef PCI_DISABLE_MWI
2653int pci_set_mwi(struct pci_dev *dev)
2654{
2655	return 0;
2656}
2657
2658int pci_try_set_mwi(struct pci_dev *dev)
2659{
2660	return 0;
2661}
2662
2663void pci_clear_mwi(struct pci_dev *dev)
2664{
2665}
2666
2667#else
2668
2669/**
2670 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2671 * @dev: the PCI device for which MWI is enabled
2672 *
2673 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2674 *
2675 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2676 */
2677int
2678pci_set_mwi(struct pci_dev *dev)
2679{
 
 
 
2680	int rc;
2681	u16 cmd;
2682
2683	rc = pci_set_cacheline_size(dev);
2684	if (rc)
2685		return rc;
2686
2687	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2688	if (! (cmd & PCI_COMMAND_INVALIDATE)) {
2689		dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
2690		cmd |= PCI_COMMAND_INVALIDATE;
2691		pci_write_config_word(dev, PCI_COMMAND, cmd);
2692	}
2693	
2694	return 0;
 
2695}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2696
2697/**
2698 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2699 * @dev: the PCI device for which MWI is enabled
2700 *
2701 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2702 * Callers are not required to check the return value.
2703 *
2704 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2705 */
2706int pci_try_set_mwi(struct pci_dev *dev)
2707{
2708	int rc = pci_set_mwi(dev);
2709	return rc;
 
 
 
2710}
 
2711
2712/**
2713 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2714 * @dev: the PCI device to disable
2715 *
2716 * Disables PCI Memory-Write-Invalidate transaction on the device
2717 */
2718void
2719pci_clear_mwi(struct pci_dev *dev)
2720{
 
2721	u16 cmd;
2722
2723	pci_read_config_word(dev, PCI_COMMAND, &cmd);
2724	if (cmd & PCI_COMMAND_INVALIDATE) {
2725		cmd &= ~PCI_COMMAND_INVALIDATE;
2726		pci_write_config_word(dev, PCI_COMMAND, cmd);
2727	}
 
2728}
2729#endif /* ! PCI_DISABLE_MWI */
2730
2731/**
2732 * pci_intx - enables/disables PCI INTx for device dev
2733 * @pdev: the PCI device to operate on
2734 * @enable: boolean: whether to enable or disable PCI INTx
2735 *
2736 * Enables/disables PCI INTx for device dev
2737 */
2738void
2739pci_intx(struct pci_dev *pdev, int enable)
2740{
2741	u16 pci_command, new;
2742
2743	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2744
2745	if (enable) {
2746		new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2747	} else {
2748		new = pci_command | PCI_COMMAND_INTX_DISABLE;
2749	}
2750
2751	if (new != pci_command) {
2752		struct pci_devres *dr;
2753
2754		pci_write_config_word(pdev, PCI_COMMAND, new);
2755
2756		dr = find_pci_dr(pdev);
2757		if (dr && !dr->restore_intx) {
2758			dr->restore_intx = 1;
2759			dr->orig_intx = !enable;
2760		}
2761	}
2762}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2763
2764/**
2765 * pci_msi_off - disables any msi or msix capabilities
2766 * @dev: the PCI device to operate on
2767 *
2768 * If you want to use msi see pci_enable_msi and friends.
2769 * This is a lower level primitive that allows us to disable
2770 * msi operation at the device level.
2771 */
2772void pci_msi_off(struct pci_dev *dev)
2773{
2774	int pos;
2775	u16 control;
 
2776
2777	pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
2778	if (pos) {
2779		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
2780		control &= ~PCI_MSI_FLAGS_ENABLE;
2781		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
2782	}
2783	pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
2784	if (pos) {
2785		pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
2786		control &= ~PCI_MSIX_FLAGS_ENABLE;
2787		pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
2788	}
2789}
2790EXPORT_SYMBOL_GPL(pci_msi_off);
2791
2792int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
 
 
 
 
 
 
2793{
2794	return dma_set_max_seg_size(&dev->dev, size);
 
 
 
 
2795}
2796EXPORT_SYMBOL(pci_set_dma_max_seg_size);
2797
2798int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
2799{
2800	return dma_set_seg_boundary(&dev->dev, mask);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2801}
2802EXPORT_SYMBOL(pci_set_dma_seg_boundary);
2803
2804static int pcie_flr(struct pci_dev *dev, int probe)
 
 
 
 
 
 
 
2805{
2806	int i;
2807	int pos;
2808	u32 cap;
2809	u16 status, control;
2810
2811	pos = pci_pcie_cap(dev);
2812	if (!pos)
2813		return -ENOTTY;
2814
2815	pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
2816	if (!(cap & PCI_EXP_DEVCAP_FLR))
2817		return -ENOTTY;
 
2818
2819	if (probe)
2820		return 0;
 
 
 
 
 
 
 
 
 
 
2821
2822	/* Wait for Transaction Pending bit clean */
2823	for (i = 0; i < 4; i++) {
2824		if (i)
2825			msleep((1 << (i - 1)) * 100);
2826
2827		pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
2828		if (!(status & PCI_EXP_DEVSTA_TRPND))
2829			goto clear;
2830	}
2831
2832	dev_err(&dev->dev, "transaction is not cleared; "
2833			"proceeding with reset anyway\n");
2834
2835clear:
2836	pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
2837	control |= PCI_EXP_DEVCTL_BCR_FLR;
2838	pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
2839
 
 
 
 
 
2840	msleep(100);
2841
2842	return 0;
2843}
 
2844
2845static int pci_af_flr(struct pci_dev *dev, int probe)
2846{
2847	int i;
2848	int pos;
2849	u8 cap;
2850	u8 status;
2851
2852	pos = pci_find_capability(dev, PCI_CAP_ID_AF);
2853	if (!pos)
2854		return -ENOTTY;
2855
 
 
 
2856	pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
2857	if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
2858		return -ENOTTY;
2859
2860	if (probe)
2861		return 0;
2862
2863	/* Wait for Transaction Pending bit clean */
2864	for (i = 0; i < 4; i++) {
2865		if (i)
2866			msleep((1 << (i - 1)) * 100);
 
 
 
 
2867
2868		pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
2869		if (!(status & PCI_AF_STATUS_TP))
2870			goto clear;
2871	}
2872
2873	dev_err(&dev->dev, "transaction is not cleared; "
2874			"proceeding with reset anyway\n");
2875
2876clear:
2877	pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
 
 
 
 
2878	msleep(100);
2879
2880	return 0;
2881}
2882
2883/**
2884 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
2885 * @dev: Device to reset.
2886 * @probe: If set, only check if the device can be reset this way.
2887 *
2888 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
2889 * unset, it will be reinitialized internally when going from PCI_D3hot to
2890 * PCI_D0.  If that's the case and the device is not in a low-power state
2891 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
2892 *
2893 * NOTE: This causes the caller to sleep for twice the device power transition
2894 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
2895 * by devault (i.e. unless the @dev's d3_delay field has a different value).
2896 * Moreover, only devices in D0 can be reset by this function.
2897 */
2898static int pci_pm_reset(struct pci_dev *dev, int probe)
2899{
2900	u16 csr;
2901
2902	if (!dev->pm_cap)
2903		return -ENOTTY;
2904
2905	pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
2906	if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
2907		return -ENOTTY;
2908
2909	if (probe)
2910		return 0;
2911
2912	if (dev->current_state != PCI_D0)
2913		return -EINVAL;
2914
2915	csr &= ~PCI_PM_CTRL_STATE_MASK;
2916	csr |= PCI_D3hot;
2917	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2918	pci_dev_d3_sleep(dev);
2919
2920	csr &= ~PCI_PM_CTRL_STATE_MASK;
2921	csr |= PCI_D0;
2922	pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
2923	pci_dev_d3_sleep(dev);
2924
2925	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2926}
 
2927
2928static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
2929{
2930	u16 ctrl;
2931	struct pci_dev *pdev;
2932
2933	if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
 
2934		return -ENOTTY;
2935
2936	list_for_each_entry(pdev, &dev->bus->devices, bus_list)
2937		if (pdev != dev)
2938			return -ENOTTY;
2939
2940	if (probe)
2941		return 0;
2942
2943	pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
2944	ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
2945	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2946	msleep(100);
 
 
 
 
 
 
 
 
2947
2948	ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
2949	pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
2950	msleep(100);
2951
2952	return 0;
2953}
2954
2955static int pci_dev_reset(struct pci_dev *dev, int probe)
2956{
2957	int rc;
 
 
 
 
 
 
 
 
 
 
 
2958
2959	might_sleep();
 
 
 
 
 
2960
2961	if (!probe) {
2962		pci_block_user_cfg_access(dev);
2963		/* block PM suspend, driver probe, etc. */
2964		device_lock(&dev->dev);
 
 
 
2965	}
2966
2967	rc = pci_dev_specific_reset(dev, probe);
2968	if (rc != -ENOTTY)
2969		goto done;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2970
2971	rc = pcie_flr(dev, probe);
2972	if (rc != -ENOTTY)
2973		goto done;
 
 
 
2974
2975	rc = pci_af_flr(dev, probe);
2976	if (rc != -ENOTTY)
2977		goto done;
 
 
 
 
 
 
 
2978
2979	rc = pci_pm_reset(dev, probe);
2980	if (rc != -ENOTTY)
2981		goto done;
 
2982
2983	rc = pci_parent_bus_reset(dev, probe);
2984done:
2985	if (!probe) {
2986		device_unlock(&dev->dev);
2987		pci_unblock_user_cfg_access(dev);
2988	}
2989
2990	return rc;
 
 
 
 
 
 
2991}
2992
2993/**
2994 * __pci_reset_function - reset a PCI device function
 
2995 * @dev: PCI device to reset
2996 *
2997 * Some devices allow an individual function to be reset without affecting
2998 * other functions in the same device.  The PCI device must be responsive
2999 * to PCI config space in order to use this function.
3000 *
3001 * The device function is presumed to be unused when this function is called.
 
 
3002 * Resetting the device will make the contents of PCI configuration space
3003 * random, so any caller of this must be prepared to reinitialise the
3004 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3005 * etc.
3006 *
3007 * Returns 0 if the device function was successfully reset or negative if the
3008 * device doesn't support resetting a single function.
3009 */
3010int __pci_reset_function(struct pci_dev *dev)
3011{
3012	return pci_dev_reset(dev, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3013}
3014EXPORT_SYMBOL_GPL(__pci_reset_function);
3015
3016/**
3017 * pci_probe_reset_function - check whether the device can be safely reset
3018 * @dev: PCI device to reset
3019 *
3020 * Some devices allow an individual function to be reset without affecting
3021 * other functions in the same device.  The PCI device must be responsive
3022 * to PCI config space in order to use this function.
3023 *
3024 * Returns 0 if the device function can be reset or negative if the
3025 * device doesn't support resetting a single function.
3026 */
3027int pci_probe_reset_function(struct pci_dev *dev)
3028{
3029	return pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3030}
3031
3032/**
3033 * pci_reset_function - quiesce and reset a PCI device function
3034 * @dev: PCI device to reset
3035 *
3036 * Some devices allow an individual function to be reset without affecting
3037 * other functions in the same device.  The PCI device must be responsive
3038 * to PCI config space in order to use this function.
3039 *
3040 * This function does not just reset the PCI portion of a device, but
3041 * clears all the state associated with the device.  This function differs
3042 * from __pci_reset_function in that it saves and restores device state
3043 * over the reset.
3044 *
3045 * Returns 0 if the device function was successfully reset or negative if the
3046 * device doesn't support resetting a single function.
3047 */
3048int pci_reset_function(struct pci_dev *dev)
3049{
3050	int rc;
3051
3052	rc = pci_dev_reset(dev, 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3053	if (rc)
3054		return rc;
3055
3056	pci_save_state(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3057
3058	/*
3059	 * both INTx and MSI are disabled after the Interrupt Disable bit
3060	 * is set and the Bus Master bit is cleared.
3061	 */
3062	pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
 
 
 
 
3063
3064	rc = pci_dev_reset(dev, 0);
 
 
3065
3066	pci_restore_state(dev);
 
 
 
 
 
 
 
3067
3068	return rc;
3069}
3070EXPORT_SYMBOL_GPL(pci_reset_function);
 
 
 
 
 
 
 
 
 
 
 
 
3071
3072/**
3073 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3074 * @dev: PCI device to query
3075 *
3076 * Returns mmrbc: maximum designed memory read count in bytes
3077 *    or appropriate error value.
3078 */
3079int pcix_get_max_mmrbc(struct pci_dev *dev)
3080{
3081	int cap;
3082	u32 stat;
3083
3084	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3085	if (!cap)
3086		return -EINVAL;
3087
3088	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3089		return -EINVAL;
3090
3091	return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
3092}
3093EXPORT_SYMBOL(pcix_get_max_mmrbc);
3094
3095/**
3096 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3097 * @dev: PCI device to query
3098 *
3099 * Returns mmrbc: maximum memory read count in bytes
3100 *    or appropriate error value.
3101 */
3102int pcix_get_mmrbc(struct pci_dev *dev)
3103{
3104	int cap;
3105	u16 cmd;
3106
3107	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3108	if (!cap)
3109		return -EINVAL;
3110
3111	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3112		return -EINVAL;
3113
3114	return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
3115}
3116EXPORT_SYMBOL(pcix_get_mmrbc);
3117
3118/**
3119 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3120 * @dev: PCI device to query
3121 * @mmrbc: maximum memory read count in bytes
3122 *    valid values are 512, 1024, 2048, 4096
3123 *
3124 * If possible sets maximum memory read byte count, some bridges have erratas
3125 * that prevent this.
3126 */
3127int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3128{
3129	int cap;
3130	u32 stat, v, o;
3131	u16 cmd;
3132
3133	if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
3134		return -EINVAL;
3135
3136	v = ffs(mmrbc) - 10;
3137
3138	cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3139	if (!cap)
3140		return -EINVAL;
3141
3142	if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3143		return -EINVAL;
3144
3145	if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3146		return -E2BIG;
3147
3148	if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3149		return -EINVAL;
3150
3151	o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3152	if (o != v) {
3153		if (v > o && dev->bus &&
3154		   (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3155			return -EIO;
3156
3157		cmd &= ~PCI_X_CMD_MAX_READ;
3158		cmd |= v << 2;
3159		if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3160			return -EIO;
3161	}
3162	return 0;
3163}
3164EXPORT_SYMBOL(pcix_set_mmrbc);
3165
3166/**
3167 * pcie_get_readrq - get PCI Express read request size
3168 * @dev: PCI device to query
3169 *
3170 * Returns maximum memory read request in bytes
3171 *    or appropriate error value.
3172 */
3173int pcie_get_readrq(struct pci_dev *dev)
3174{
3175	int ret, cap;
3176	u16 ctl;
3177
3178	cap = pci_pcie_cap(dev);
3179	if (!cap)
3180		return -EINVAL;
3181
3182	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3183	if (!ret)
3184		ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3185
3186	return ret;
3187}
3188EXPORT_SYMBOL(pcie_get_readrq);
3189
3190/**
3191 * pcie_set_readrq - set PCI Express maximum memory read request
3192 * @dev: PCI device to query
3193 * @rq: maximum memory read count in bytes
3194 *    valid values are 128, 256, 512, 1024, 2048, 4096
3195 *
3196 * If possible sets maximum memory read request in bytes
3197 */
3198int pcie_set_readrq(struct pci_dev *dev, int rq)
3199{
3200	int cap, err = -EINVAL;
3201	u16 ctl, v;
3202
3203	if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
3204		goto out;
3205
3206	v = (ffs(rq) - 8) << 12;
 
 
 
 
 
 
3207
3208	cap = pci_pcie_cap(dev);
3209	if (!cap)
3210		goto out;
3211
3212	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3213	if (err)
3214		goto out;
3215
3216	if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3217		ctl &= ~PCI_EXP_DEVCTL_READRQ;
3218		ctl |= v;
3219		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3220	}
3221
3222out:
3223	return err;
3224}
3225EXPORT_SYMBOL(pcie_set_readrq);
3226
3227/**
3228 * pcie_get_mps - get PCI Express maximum payload size
3229 * @dev: PCI device to query
3230 *
3231 * Returns maximum payload size in bytes
3232 *    or appropriate error value.
3233 */
3234int pcie_get_mps(struct pci_dev *dev)
3235{
3236	int ret, cap;
3237	u16 ctl;
3238
3239	cap = pci_pcie_cap(dev);
3240	if (!cap)
3241		return -EINVAL;
3242
3243	ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3244	if (!ret)
3245		ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3246
3247	return ret;
3248}
 
3249
3250/**
3251 * pcie_set_mps - set PCI Express maximum payload size
3252 * @dev: PCI device to query
3253 * @mps: maximum payload size in bytes
3254 *    valid values are 128, 256, 512, 1024, 2048, 4096
3255 *
3256 * If possible sets maximum payload size
3257 */
3258int pcie_set_mps(struct pci_dev *dev, int mps)
3259{
3260	int cap, err = -EINVAL;
3261	u16 ctl, v;
3262
3263	if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3264		goto out;
3265
3266	v = ffs(mps) - 8;
3267	if (v > dev->pcie_mpss) 
3268		goto out;
3269	v <<= 5;
3270
3271	cap = pci_pcie_cap(dev);
3272	if (!cap)
3273		goto out;
 
3274
3275	err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3276	if (err)
3277		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3278
3279	if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3280		ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3281		ctl |= v;
3282		err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3283	}
3284out:
3285	return err;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3286}
 
3287
3288/**
3289 * pci_select_bars - Make BAR mask from the type of resource
3290 * @dev: the PCI device for which BAR mask is made
3291 * @flags: resource type mask to be selected
3292 *
3293 * This helper routine makes bar mask from the type of resource.
3294 */
3295int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3296{
3297	int i, bars = 0;
3298	for (i = 0; i < PCI_NUM_RESOURCES; i++)
3299		if (pci_resource_flags(dev, i) & flags)
3300			bars |= (1 << i);
3301	return bars;
3302}
3303
3304/**
3305 * pci_resource_bar - get position of the BAR associated with a resource
3306 * @dev: the PCI device
3307 * @resno: the resource number
3308 * @type: the BAR type to be filled in
3309 *
3310 * Returns BAR position in config space, or 0 if the BAR is invalid.
3311 */
3312int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3313{
3314	int reg;
3315
3316	if (resno < PCI_ROM_RESOURCE) {
3317		*type = pci_bar_unknown;
3318		return PCI_BASE_ADDRESS_0 + 4 * resno;
3319	} else if (resno == PCI_ROM_RESOURCE) {
3320		*type = pci_bar_mem32;
3321		return dev->rom_base_reg;
3322	} else if (resno < PCI_BRIDGE_RESOURCES) {
3323		/* device specific resource */
3324		reg = pci_iov_resource_bar(dev, resno, type);
3325		if (reg)
3326			return reg;
3327	}
3328
3329	dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
3330	return 0;
3331}
3332
3333/* Some architectures require additional programming to enable VGA */
3334static arch_set_vga_state_t arch_set_vga_state;
3335
3336void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3337{
3338	arch_set_vga_state = func;	/* NULL disables */
3339}
3340
3341static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3342		      unsigned int command_bits, u32 flags)
3343{
3344	if (arch_set_vga_state)
3345		return arch_set_vga_state(dev, decode, command_bits,
3346						flags);
3347	return 0;
3348}
3349
3350/**
3351 * pci_set_vga_state - set VGA decode state on device and parents if requested
3352 * @dev: the PCI device
3353 * @decode: true = enable decoding, false = disable decoding
3354 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3355 * @flags: traverse ancestors and change bridges
3356 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
3357 */
3358int pci_set_vga_state(struct pci_dev *dev, bool decode,
3359		      unsigned int command_bits, u32 flags)
3360{
3361	struct pci_bus *bus;
3362	struct pci_dev *bridge;
3363	u16 cmd;
3364	int rc;
3365
3366	WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
3367
3368	/* ARCH specific VGA enables */
3369	rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
3370	if (rc)
3371		return rc;
3372
3373	if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3374		pci_read_config_word(dev, PCI_COMMAND, &cmd);
3375		if (decode == true)
3376			cmd |= command_bits;
3377		else
3378			cmd &= ~command_bits;
3379		pci_write_config_word(dev, PCI_COMMAND, cmd);
3380	}
3381
3382	if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
3383		return 0;
3384
3385	bus = dev->bus;
3386	while (bus) {
3387		bridge = bus->self;
3388		if (bridge) {
3389			pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3390					     &cmd);
3391			if (decode == true)
3392				cmd |= PCI_BRIDGE_CTL_VGA;
3393			else
3394				cmd &= ~PCI_BRIDGE_CTL_VGA;
3395			pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3396					      cmd);
3397		}
3398		bus = bus->parent;
3399	}
3400	return 0;
3401}
3402
3403#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3404static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3405static DEFINE_SPINLOCK(resource_alignment_lock);
3406
3407/**
3408 * pci_specified_resource_alignment - get resource alignment specified by user.
3409 * @dev: the PCI device to get
 
3410 *
3411 * RETURNS: Resource alignment if it is specified.
3412 *          Zero if it is not specified.
3413 */
3414resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
 
3415{
3416	int seg, bus, slot, func, align_order, count;
3417	resource_size_t align = 0;
3418	char *p;
 
3419
3420	spin_lock(&resource_alignment_lock);
3421	p = resource_alignment_param;
 
 
 
 
 
 
 
 
3422	while (*p) {
3423		count = 0;
3424		if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3425							p[count] == '@') {
3426			p += count + 1;
3427		} else {
3428			align_order = -1;
3429		}
3430		if (sscanf(p, "%x:%x:%x.%x%n",
3431			&seg, &bus, &slot, &func, &count) != 4) {
3432			seg = 0;
3433			if (sscanf(p, "%x:%x.%x%n",
3434					&bus, &slot, &func, &count) != 3) {
3435				/* Invalid format */
3436				printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3437					p);
3438				break;
3439			}
3440		}
3441		p += count;
3442		if (seg == pci_domain_nr(dev->bus) &&
3443			bus == dev->bus->number &&
3444			slot == PCI_SLOT(dev->devfn) &&
3445			func == PCI_FUNC(dev->devfn)) {
3446			if (align_order == -1) {
3447				align = PAGE_SIZE;
3448			} else {
3449				align = 1 << align_order;
3450			}
3451			/* Found */
 
 
3452			break;
3453		}
 
3454		if (*p != ';' && *p != ',') {
3455			/* End of param or invalid format */
3456			break;
3457		}
3458		p++;
3459	}
 
3460	spin_unlock(&resource_alignment_lock);
3461	return align;
3462}
3463
3464/**
3465 * pci_is_reassigndev - check if specified PCI is target device to reassign
3466 * @dev: the PCI device to check
3467 *
3468 * RETURNS: non-zero for PCI device is a target device to reassign,
3469 *          or zero is not.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3470 */
3471int pci_is_reassigndev(struct pci_dev *dev)
3472{
3473	return (pci_specified_resource_alignment(dev) != 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3474}
3475
3476ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3477{
3478	if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3479		count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3480	spin_lock(&resource_alignment_lock);
3481	strncpy(resource_alignment_param, buf, count);
3482	resource_alignment_param[count] = '\0';
3483	spin_unlock(&resource_alignment_lock);
 
 
 
 
 
 
 
 
 
 
 
3484	return count;
3485}
3486
3487ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
 
3488{
3489	size_t count;
 
 
 
 
3490	spin_lock(&resource_alignment_lock);
3491	count = snprintf(buf, size, "%s", resource_alignment_param);
 
3492	spin_unlock(&resource_alignment_lock);
3493	return count;
3494}
3495
3496static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
 
 
3497{
3498	return pci_get_resource_alignment_param(buf, PAGE_SIZE);
 
3499}
 
3500
3501static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3502					const char *buf, size_t count)
3503{
3504	return pci_set_resource_alignment_param(buf, count);
 
 
3505}
3506
3507BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3508					pci_resource_alignment_store);
3509
3510static int __init pci_resource_alignment_sysfs_init(void)
3511{
3512	return bus_create_file(&pci_bus_type,
3513					&bus_attr_resource_alignment);
3514}
3515
3516late_initcall(pci_resource_alignment_sysfs_init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3517
3518static void __devinit pci_no_domains(void)
3519{
3520#ifdef CONFIG_PCI_DOMAINS
3521	pci_domains_supported = 0;
 
3522#endif
3523}
3524
3525/**
3526 * pci_ext_cfg_enabled - can we access extended PCI config space?
3527 * @dev: The PCI device of the root bridge.
3528 *
3529 * Returns 1 if we can access PCI extended config space (offsets
3530 * greater than 0xff). This is the default implementation. Architecture
3531 * implementations can override this.
3532 */
3533int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3534{
3535	return 1;
3536}
3537
3538void __weak pci_fixup_cardbus(struct pci_bus *bus)
3539{
3540}
3541EXPORT_SYMBOL(pci_fixup_cardbus);
3542
3543static int __init pci_setup(char *str)
3544{
3545	while (str) {
3546		char *k = strchr(str, ',');
3547		if (k)
3548			*k++ = 0;
3549		if (*str && (str = pcibios_setup(str)) && *str) {
3550			if (!strcmp(str, "nomsi")) {
3551				pci_no_msi();
 
 
 
3552			} else if (!strcmp(str, "noaer")) {
3553				pci_no_aer();
 
 
 
 
3554			} else if (!strncmp(str, "realloc", 7)) {
3555				pci_realloc();
3556			} else if (!strcmp(str, "nodomains")) {
3557				pci_no_domains();
 
 
3558			} else if (!strncmp(str, "cbiosize=", 9)) {
3559				pci_cardbus_io_size = memparse(str + 9, &str);
3560			} else if (!strncmp(str, "cbmemsize=", 10)) {
3561				pci_cardbus_mem_size = memparse(str + 10, &str);
3562			} else if (!strncmp(str, "resource_alignment=", 19)) {
3563				pci_set_resource_alignment_param(str + 19,
3564							strlen(str + 19));
3565			} else if (!strncmp(str, "ecrc=", 5)) {
3566				pcie_ecrc_get_policy(str + 5);
3567			} else if (!strncmp(str, "hpiosize=", 9)) {
3568				pci_hotplug_io_size = memparse(str + 9, &str);
3569			} else if (!strncmp(str, "hpmemsize=", 10)) {
3570				pci_hotplug_mem_size = memparse(str + 10, &str);
 
 
 
 
 
3571			} else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3572				pcie_bus_config = PCIE_BUS_TUNE_OFF;
3573			} else if (!strncmp(str, "pcie_bus_safe", 13)) {
3574				pcie_bus_config = PCIE_BUS_SAFE;
3575			} else if (!strncmp(str, "pcie_bus_perf", 13)) {
3576				pcie_bus_config = PCIE_BUS_PERFORMANCE;
3577			} else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3578				pcie_bus_config = PCIE_BUS_PEER2PEER;
 
 
 
 
3579			} else {
3580				printk(KERN_ERR "PCI: Unknown option `%s'\n",
3581						str);
3582			}
3583		}
3584		str = k;
3585	}
3586	return 0;
3587}
3588early_param("pci", pci_setup);
3589
3590EXPORT_SYMBOL(pci_reenable_device);
3591EXPORT_SYMBOL(pci_enable_device_io);
3592EXPORT_SYMBOL(pci_enable_device_mem);
3593EXPORT_SYMBOL(pci_enable_device);
3594EXPORT_SYMBOL(pcim_enable_device);
3595EXPORT_SYMBOL(pcim_pin_device);
3596EXPORT_SYMBOL(pci_disable_device);
3597EXPORT_SYMBOL(pci_find_capability);
3598EXPORT_SYMBOL(pci_bus_find_capability);
3599EXPORT_SYMBOL(pci_release_regions);
3600EXPORT_SYMBOL(pci_request_regions);
3601EXPORT_SYMBOL(pci_request_regions_exclusive);
3602EXPORT_SYMBOL(pci_release_region);
3603EXPORT_SYMBOL(pci_request_region);
3604EXPORT_SYMBOL(pci_request_region_exclusive);
3605EXPORT_SYMBOL(pci_release_selected_regions);
3606EXPORT_SYMBOL(pci_request_selected_regions);
3607EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
3608EXPORT_SYMBOL(pci_set_master);
3609EXPORT_SYMBOL(pci_clear_master);
3610EXPORT_SYMBOL(pci_set_mwi);
3611EXPORT_SYMBOL(pci_try_set_mwi);
3612EXPORT_SYMBOL(pci_clear_mwi);
3613EXPORT_SYMBOL_GPL(pci_intx);
3614EXPORT_SYMBOL(pci_assign_resource);
3615EXPORT_SYMBOL(pci_find_parent_resource);
3616EXPORT_SYMBOL(pci_select_bars);
3617
3618EXPORT_SYMBOL(pci_set_power_state);
3619EXPORT_SYMBOL(pci_save_state);
3620EXPORT_SYMBOL(pci_restore_state);
3621EXPORT_SYMBOL(pci_pme_capable);
3622EXPORT_SYMBOL(pci_pme_active);
3623EXPORT_SYMBOL(pci_wake_from_d3);
3624EXPORT_SYMBOL(pci_target_state);
3625EXPORT_SYMBOL(pci_prepare_to_sleep);
3626EXPORT_SYMBOL(pci_back_from_sleep);
3627EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);