Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Volume Management Device driver
   4 * Copyright (c) 2015, Intel Corporation.
   5 */
   6
   7#include <linux/device.h>
   8#include <linux/interrupt.h>
   9#include <linux/irq.h>
  10#include <linux/kernel.h>
  11#include <linux/module.h>
  12#include <linux/msi.h>
  13#include <linux/pci.h>
  14#include <linux/pci-acpi.h>
  15#include <linux/pci-ecam.h>
  16#include <linux/srcu.h>
  17#include <linux/rculist.h>
  18#include <linux/rcupdate.h>
  19
  20#include <asm/irqdomain.h>
 
 
 
  21
  22#define VMD_CFGBAR	0
  23#define VMD_MEMBAR1	2
  24#define VMD_MEMBAR2	4
  25
  26#define PCI_REG_VMCAP		0x40
  27#define BUS_RESTRICT_CAP(vmcap)	(vmcap & 0x1)
  28#define PCI_REG_VMCONFIG	0x44
  29#define BUS_RESTRICT_CFG(vmcfg)	((vmcfg >> 8) & 0x3)
  30#define VMCONFIG_MSI_REMAP	0x2
  31#define PCI_REG_VMLOCK		0x70
  32#define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
  33
  34#define MB2_SHADOW_OFFSET	0x2000
  35#define MB2_SHADOW_SIZE		16
  36
  37enum vmd_features {
  38	/*
  39	 * Device may contain registers which hint the physical location of the
  40	 * membars, in order to allow proper address translation during
  41	 * resource assignment to enable guest virtualization
  42	 */
  43	VMD_FEAT_HAS_MEMBAR_SHADOW		= (1 << 0),
  44
  45	/*
  46	 * Device may provide root port configuration information which limits
  47	 * bus numbering
  48	 */
  49	VMD_FEAT_HAS_BUS_RESTRICTIONS		= (1 << 1),
  50
  51	/*
  52	 * Device contains physical location shadow registers in
  53	 * vendor-specific capability space
  54	 */
  55	VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP	= (1 << 2),
  56
  57	/*
  58	 * Device may use MSI-X vector 0 for software triggering and will not
  59	 * be used for MSI remapping
  60	 */
  61	VMD_FEAT_OFFSET_FIRST_VECTOR		= (1 << 3),
  62
  63	/*
  64	 * Device can bypass remapping MSI-X transactions into its MSI-X table,
  65	 * avoiding the requirement of a VMD MSI domain for child device
  66	 * interrupt handling.
  67	 */
  68	VMD_FEAT_CAN_BYPASS_MSI_REMAP		= (1 << 4),
  69
  70	/*
  71	 * Enable ASPM on the PCIE root ports and set the default LTR of the
  72	 * storage devices on platforms where these values are not configured by
  73	 * BIOS. This is needed for laptops, which require these settings for
  74	 * proper power management of the SoC.
  75	 */
  76	VMD_FEAT_BIOS_PM_QUIRK		= (1 << 5),
  77};
  78
  79#define VMD_BIOS_PM_QUIRK_LTR	0x1003	/* 3145728 ns */
  80
  81#define VMD_FEATS_CLIENT	(VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |	\
  82				 VMD_FEAT_HAS_BUS_RESTRICTIONS |	\
  83				 VMD_FEAT_OFFSET_FIRST_VECTOR |		\
  84				 VMD_FEAT_BIOS_PM_QUIRK)
  85
  86static DEFINE_IDA(vmd_instance_ida);
  87
  88/*
  89 * Lock for manipulating VMD IRQ lists.
  90 */
  91static DEFINE_RAW_SPINLOCK(list_lock);
  92
  93/**
  94 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
  95 * @node:	list item for parent traversal.
  96 * @irq:	back pointer to parent.
  97 * @enabled:	true if driver enabled IRQ
  98 * @virq:	the virtual IRQ value provided to the requesting driver.
  99 *
 100 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
 101 * a VMD IRQ using this structure.
 102 */
 103struct vmd_irq {
 104	struct list_head	node;
 105	struct vmd_irq_list	*irq;
 106	bool			enabled;
 107	unsigned int		virq;
 108};
 109
 110/**
 111 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
 112 * @irq_list:	the list of irq's the VMD one demuxes to.
 113 * @srcu:	SRCU struct for local synchronization.
 114 * @count:	number of child IRQs assigned to this vector; used to track
 115 *		sharing.
 116 * @virq:	The underlying VMD Linux interrupt number
 117 */
 118struct vmd_irq_list {
 119	struct list_head	irq_list;
 120	struct srcu_struct	srcu;
 121	unsigned int		count;
 122	unsigned int		virq;
 123};
 124
 125struct vmd_dev {
 126	struct pci_dev		*dev;
 127
 128	spinlock_t		cfg_lock;
 129	void __iomem		*cfgbar;
 130
 131	int msix_count;
 132	struct vmd_irq_list	*irqs;
 133
 134	struct pci_sysdata	sysdata;
 135	struct resource		resources[3];
 136	struct irq_domain	*irq_domain;
 137	struct pci_bus		*bus;
 138	u8			busn_start;
 139	u8			first_vec;
 140	char			*name;
 141	int			instance;
 142};
 143
 144static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
 145{
 146	return container_of(bus->sysdata, struct vmd_dev, sysdata);
 147}
 148
 149static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
 150					   struct vmd_irq_list *irqs)
 151{
 152	return irqs - vmd->irqs;
 153}
 154
 155/*
 156 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
 157 * but the MSI entry for the hardware it's driving will be programmed with a
 158 * destination ID for the VMD MSI-X table.  The VMD muxes interrupts in its
 159 * domain into one of its own, and the VMD driver de-muxes these for the
 160 * handlers sharing that VMD IRQ.  The vmd irq_domain provides the operations
 161 * and irq_chip to set this up.
 162 */
 163static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
 164{
 165	struct vmd_irq *vmdirq = data->chip_data;
 166	struct vmd_irq_list *irq = vmdirq->irq;
 167	struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
 168
 169	memset(msg, 0, sizeof(*msg));
 170	msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
 171	msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
 172	msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
 173}
 174
 175/*
 176 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
 177 */
 178static void vmd_irq_enable(struct irq_data *data)
 179{
 180	struct vmd_irq *vmdirq = data->chip_data;
 181	unsigned long flags;
 182
 183	raw_spin_lock_irqsave(&list_lock, flags);
 184	WARN_ON(vmdirq->enabled);
 185	list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
 186	vmdirq->enabled = true;
 187	raw_spin_unlock_irqrestore(&list_lock, flags);
 188
 189	data->chip->irq_unmask(data);
 190}
 191
 192static void vmd_irq_disable(struct irq_data *data)
 193{
 194	struct vmd_irq *vmdirq = data->chip_data;
 195	unsigned long flags;
 196
 197	data->chip->irq_mask(data);
 198
 199	raw_spin_lock_irqsave(&list_lock, flags);
 200	if (vmdirq->enabled) {
 201		list_del_rcu(&vmdirq->node);
 202		vmdirq->enabled = false;
 203	}
 204	raw_spin_unlock_irqrestore(&list_lock, flags);
 205}
 206
 
 
 
 
 
 
 
 
 
 
 207static struct irq_chip vmd_msi_controller = {
 208	.name			= "VMD-MSI",
 209	.irq_enable		= vmd_irq_enable,
 210	.irq_disable		= vmd_irq_disable,
 211	.irq_compose_msi_msg	= vmd_compose_msi_msg,
 
 212};
 213
 214static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
 215				     msi_alloc_info_t *arg)
 216{
 217	return 0;
 218}
 219
 220/*
 221 * XXX: We can be even smarter selecting the best IRQ once we solve the
 222 * affinity problem.
 223 */
 224static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
 225{
 
 226	unsigned long flags;
 227	int i, best;
 228
 229	if (vmd->msix_count == 1 + vmd->first_vec)
 230		return &vmd->irqs[vmd->first_vec];
 231
 232	/*
 233	 * White list for fast-interrupt handlers. All others will share the
 234	 * "slow" interrupt vector.
 235	 */
 236	switch (msi_desc_to_pci_dev(desc)->class) {
 237	case PCI_CLASS_STORAGE_EXPRESS:
 238		break;
 239	default:
 240		return &vmd->irqs[vmd->first_vec];
 241	}
 242
 243	raw_spin_lock_irqsave(&list_lock, flags);
 244	best = vmd->first_vec + 1;
 245	for (i = best; i < vmd->msix_count; i++)
 246		if (vmd->irqs[i].count < vmd->irqs[best].count)
 247			best = i;
 248	vmd->irqs[best].count++;
 249	raw_spin_unlock_irqrestore(&list_lock, flags);
 250
 251	return &vmd->irqs[best];
 252}
 253
 254static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
 255			unsigned int virq, irq_hw_number_t hwirq,
 256			msi_alloc_info_t *arg)
 257{
 258	struct msi_desc *desc = arg->desc;
 259	struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
 260	struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
 
 261
 262	if (!vmdirq)
 263		return -ENOMEM;
 264
 265	INIT_LIST_HEAD(&vmdirq->node);
 266	vmdirq->irq = vmd_next_irq(vmd, desc);
 267	vmdirq->virq = virq;
 
 
 268
 269	irq_domain_set_info(domain, virq, vmdirq->irq->virq, info->chip, vmdirq,
 270			    handle_untracked_irq, vmd, NULL);
 271	return 0;
 272}
 273
 274static void vmd_msi_free(struct irq_domain *domain,
 275			struct msi_domain_info *info, unsigned int virq)
 276{
 277	struct vmd_irq *vmdirq = irq_get_chip_data(virq);
 278	unsigned long flags;
 279
 280	synchronize_srcu(&vmdirq->irq->srcu);
 281
 282	/* XXX: Potential optimization to rebalance */
 283	raw_spin_lock_irqsave(&list_lock, flags);
 284	vmdirq->irq->count--;
 285	raw_spin_unlock_irqrestore(&list_lock, flags);
 286
 287	kfree(vmdirq);
 288}
 289
 290static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
 291			   int nvec, msi_alloc_info_t *arg)
 292{
 293	struct pci_dev *pdev = to_pci_dev(dev);
 294	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
 295
 296	if (nvec > vmd->msix_count)
 297		return vmd->msix_count;
 298
 299	memset(arg, 0, sizeof(*arg));
 300	return 0;
 301}
 302
 303static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
 304{
 305	arg->desc = desc;
 306}
 307
 308static struct msi_domain_ops vmd_msi_domain_ops = {
 309	.get_hwirq	= vmd_get_hwirq,
 310	.msi_init	= vmd_msi_init,
 311	.msi_free	= vmd_msi_free,
 312	.msi_prepare	= vmd_msi_prepare,
 313	.set_desc	= vmd_set_desc,
 314};
 315
 316static struct msi_domain_info vmd_msi_domain_info = {
 317	.flags		= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
 318			  MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX,
 319	.ops		= &vmd_msi_domain_ops,
 320	.chip		= &vmd_msi_controller,
 321};
 322
 323static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
 324{
 325	u16 reg;
 326
 327	pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, &reg);
 328	reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
 329		       (reg | VMCONFIG_MSI_REMAP);
 330	pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
 331}
 332
 333static int vmd_create_irq_domain(struct vmd_dev *vmd)
 334{
 335	struct fwnode_handle *fn;
 336
 337	fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
 338	if (!fn)
 339		return -ENODEV;
 340
 341	vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
 342	if (!vmd->irq_domain) {
 343		irq_domain_free_fwnode(fn);
 344		return -ENODEV;
 345	}
 346
 347	return 0;
 348}
 349
 350static void vmd_remove_irq_domain(struct vmd_dev *vmd)
 351{
 352	/*
 353	 * Some production BIOS won't enable remapping between soft reboots.
 354	 * Ensure remapping is restored before unloading the driver.
 355	 */
 356	if (!vmd->msix_count)
 357		vmd_set_msi_remapping(vmd, true);
 358
 359	if (vmd->irq_domain) {
 360		struct fwnode_handle *fn = vmd->irq_domain->fwnode;
 361
 362		irq_domain_remove(vmd->irq_domain);
 363		irq_domain_free_fwnode(fn);
 364	}
 365}
 366
 367static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
 368				  unsigned int devfn, int reg, int len)
 369{
 370	unsigned int busnr_ecam = bus->number - vmd->busn_start;
 371	u32 offset = PCIE_ECAM_OFFSET(busnr_ecam, devfn, reg);
 
 372
 373	if (offset + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR]))
 
 374		return NULL;
 375
 376	return vmd->cfgbar + offset;
 377}
 378
 379/*
 380 * CPU may deadlock if config space is not serialized on some versions of this
 381 * hardware, so all config space access is done under a spinlock.
 382 */
 383static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
 384			int len, u32 *value)
 385{
 386	struct vmd_dev *vmd = vmd_from_bus(bus);
 387	void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
 388	unsigned long flags;
 389	int ret = 0;
 390
 391	if (!addr)
 392		return -EFAULT;
 393
 394	spin_lock_irqsave(&vmd->cfg_lock, flags);
 395	switch (len) {
 396	case 1:
 397		*value = readb(addr);
 398		break;
 399	case 2:
 400		*value = readw(addr);
 401		break;
 402	case 4:
 403		*value = readl(addr);
 404		break;
 405	default:
 406		ret = -EINVAL;
 407		break;
 408	}
 409	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
 410	return ret;
 411}
 412
 413/*
 414 * VMD h/w converts non-posted config writes to posted memory writes. The
 415 * read-back in this function forces the completion so it returns only after
 416 * the config space was written, as expected.
 417 */
 418static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
 419			 int len, u32 value)
 420{
 421	struct vmd_dev *vmd = vmd_from_bus(bus);
 422	void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
 423	unsigned long flags;
 424	int ret = 0;
 425
 426	if (!addr)
 427		return -EFAULT;
 428
 429	spin_lock_irqsave(&vmd->cfg_lock, flags);
 430	switch (len) {
 431	case 1:
 432		writeb(value, addr);
 433		readb(addr);
 434		break;
 435	case 2:
 436		writew(value, addr);
 437		readw(addr);
 438		break;
 439	case 4:
 440		writel(value, addr);
 441		readl(addr);
 442		break;
 443	default:
 444		ret = -EINVAL;
 445		break;
 446	}
 447	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
 448	return ret;
 449}
 450
 451static struct pci_ops vmd_ops = {
 452	.read		= vmd_pci_read,
 453	.write		= vmd_pci_write,
 454};
 455
 456#ifdef CONFIG_ACPI
 457static struct acpi_device *vmd_acpi_find_companion(struct pci_dev *pci_dev)
 458{
 459	struct pci_host_bridge *bridge;
 460	u32 busnr, addr;
 461
 462	if (pci_dev->bus->ops != &vmd_ops)
 463		return NULL;
 464
 465	bridge = pci_find_host_bridge(pci_dev->bus);
 466	busnr = pci_dev->bus->number - bridge->bus->number;
 467	/*
 468	 * The address computation below is only applicable to relative bus
 469	 * numbers below 32.
 470	 */
 471	if (busnr > 31)
 472		return NULL;
 473
 474	addr = (busnr << 24) | ((u32)pci_dev->devfn << 16) | 0x8000FFFFU;
 475
 476	dev_dbg(&pci_dev->dev, "Looking for ACPI companion (address 0x%x)\n",
 477		addr);
 478
 479	return acpi_find_child_device(ACPI_COMPANION(bridge->dev.parent), addr,
 480				      false);
 481}
 482
 483static bool hook_installed;
 484
 485static void vmd_acpi_begin(void)
 486{
 487	if (pci_acpi_set_companion_lookup_hook(vmd_acpi_find_companion))
 488		return;
 489
 490	hook_installed = true;
 491}
 492
 493static void vmd_acpi_end(void)
 494{
 495	if (!hook_installed)
 496		return;
 497
 498	pci_acpi_clear_companion_lookup_hook();
 499	hook_installed = false;
 500}
 501#else
 502static inline void vmd_acpi_begin(void) { }
 503static inline void vmd_acpi_end(void) { }
 504#endif /* CONFIG_ACPI */
 505
 506static void vmd_domain_reset(struct vmd_dev *vmd)
 507{
 508	u16 bus, max_buses = resource_size(&vmd->resources[0]);
 509	u8 dev, functions, fn, hdr_type;
 510	char __iomem *base;
 511
 512	for (bus = 0; bus < max_buses; bus++) {
 513		for (dev = 0; dev < 32; dev++) {
 514			base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
 515						PCI_DEVFN(dev, 0), 0);
 516
 517			hdr_type = readb(base + PCI_HEADER_TYPE);
 518
 519			functions = (hdr_type & PCI_HEADER_TYPE_MFD) ? 8 : 1;
 520			for (fn = 0; fn < functions; fn++) {
 521				base = vmd->cfgbar + PCIE_ECAM_OFFSET(bus,
 522						PCI_DEVFN(dev, fn), 0);
 523
 524				hdr_type = readb(base + PCI_HEADER_TYPE) &
 525						PCI_HEADER_TYPE_MASK;
 526
 527				if (hdr_type != PCI_HEADER_TYPE_BRIDGE ||
 528				    (readw(base + PCI_CLASS_DEVICE) !=
 529				     PCI_CLASS_BRIDGE_PCI))
 530					continue;
 531
 532				/*
 533				 * Temporarily disable the I/O range before updating
 534				 * PCI_IO_BASE.
 535				 */
 536				writel(0x0000ffff, base + PCI_IO_BASE_UPPER16);
 537				/* Update lower 16 bits of I/O base/limit */
 538				writew(0x00f0, base + PCI_IO_BASE);
 539				/* Update upper 16 bits of I/O base/limit */
 540				writel(0, base + PCI_IO_BASE_UPPER16);
 541
 542				/* MMIO Base/Limit */
 543				writel(0x0000fff0, base + PCI_MEMORY_BASE);
 544
 545				/* Prefetchable MMIO Base/Limit */
 546				writel(0, base + PCI_PREF_LIMIT_UPPER32);
 547				writel(0x0000fff0, base + PCI_PREF_MEMORY_BASE);
 548				writel(0xffffffff, base + PCI_PREF_BASE_UPPER32);
 549			}
 550		}
 551	}
 552}
 553
 554static void vmd_attach_resources(struct vmd_dev *vmd)
 555{
 556	vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
 557	vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
 558}
 559
 560static void vmd_detach_resources(struct vmd_dev *vmd)
 561{
 562	vmd->dev->resource[VMD_MEMBAR1].child = NULL;
 563	vmd->dev->resource[VMD_MEMBAR2].child = NULL;
 564}
 565
 566/*
 567 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
 568 * Per ACPI r6.0, sec 6.5.6,  _SEG returns an integer, of which the lower
 569 * 16 bits are the PCI Segment Group (domain) number.  Other bits are
 570 * currently reserved.
 571 */
 572static int vmd_find_free_domain(void)
 573{
 574	int domain = 0xffff;
 575	struct pci_bus *bus = NULL;
 576
 577	while ((bus = pci_find_next_bus(bus)) != NULL)
 578		domain = max_t(int, domain, pci_domain_nr(bus));
 579	return domain + 1;
 580}
 581
 582static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
 583				resource_size_t *offset1,
 584				resource_size_t *offset2)
 585{
 586	struct pci_dev *dev = vmd->dev;
 587	u64 phys1, phys2;
 588
 589	if (native_hint) {
 590		u32 vmlock;
 591		int ret;
 592
 593		ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
 594		if (ret || PCI_POSSIBLE_ERROR(vmlock))
 595			return -ENODEV;
 596
 597		if (MB2_SHADOW_EN(vmlock)) {
 598			void __iomem *membar2;
 599
 600			membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);
 601			if (!membar2)
 602				return -ENOMEM;
 603			phys1 = readq(membar2 + MB2_SHADOW_OFFSET);
 604			phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8);
 605			pci_iounmap(dev, membar2);
 606		} else
 607			return 0;
 608	} else {
 609		/* Hypervisor-Emulated Vendor-Specific Capability */
 610		int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
 611		u32 reg, regu;
 612
 613		pci_read_config_dword(dev, pos + 4, &reg);
 614
 615		/* "SHDW" */
 616		if (pos && reg == 0x53484457) {
 617			pci_read_config_dword(dev, pos + 8, &reg);
 618			pci_read_config_dword(dev, pos + 12, &regu);
 619			phys1 = (u64) regu << 32 | reg;
 620
 621			pci_read_config_dword(dev, pos + 16, &reg);
 622			pci_read_config_dword(dev, pos + 20, &regu);
 623			phys2 = (u64) regu << 32 | reg;
 624		} else
 625			return 0;
 626	}
 627
 628	*offset1 = dev->resource[VMD_MEMBAR1].start -
 629			(phys1 & PCI_BASE_ADDRESS_MEM_MASK);
 630	*offset2 = dev->resource[VMD_MEMBAR2].start -
 631			(phys2 & PCI_BASE_ADDRESS_MEM_MASK);
 632
 633	return 0;
 634}
 635
 636static int vmd_get_bus_number_start(struct vmd_dev *vmd)
 637{
 638	struct pci_dev *dev = vmd->dev;
 639	u16 reg;
 640
 641	pci_read_config_word(dev, PCI_REG_VMCAP, &reg);
 642	if (BUS_RESTRICT_CAP(reg)) {
 643		pci_read_config_word(dev, PCI_REG_VMCONFIG, &reg);
 644
 645		switch (BUS_RESTRICT_CFG(reg)) {
 646		case 0:
 647			vmd->busn_start = 0;
 648			break;
 649		case 1:
 650			vmd->busn_start = 128;
 651			break;
 652		case 2:
 653			vmd->busn_start = 224;
 654			break;
 655		default:
 656			pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
 657				BUS_RESTRICT_CFG(reg));
 658			return -ENODEV;
 659		}
 660	}
 661
 662	return 0;
 663}
 664
 665static irqreturn_t vmd_irq(int irq, void *data)
 666{
 667	struct vmd_irq_list *irqs = data;
 668	struct vmd_irq *vmdirq;
 669	int idx;
 670
 671	idx = srcu_read_lock(&irqs->srcu);
 672	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
 673		generic_handle_irq(vmdirq->virq);
 674	srcu_read_unlock(&irqs->srcu, idx);
 675
 676	return IRQ_HANDLED;
 677}
 678
 679static int vmd_alloc_irqs(struct vmd_dev *vmd)
 680{
 681	struct pci_dev *dev = vmd->dev;
 682	int i, err;
 683
 684	vmd->msix_count = pci_msix_vec_count(dev);
 685	if (vmd->msix_count < 0)
 686		return -ENODEV;
 687
 688	vmd->msix_count = pci_alloc_irq_vectors(dev, vmd->first_vec + 1,
 689						vmd->msix_count, PCI_IRQ_MSIX);
 690	if (vmd->msix_count < 0)
 691		return vmd->msix_count;
 692
 693	vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
 694				 GFP_KERNEL);
 695	if (!vmd->irqs)
 696		return -ENOMEM;
 697
 698	for (i = 0; i < vmd->msix_count; i++) {
 699		err = init_srcu_struct(&vmd->irqs[i].srcu);
 700		if (err)
 701			return err;
 702
 703		INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
 704		vmd->irqs[i].virq = pci_irq_vector(dev, i);
 705		err = devm_request_irq(&dev->dev, vmd->irqs[i].virq,
 706				       vmd_irq, IRQF_NO_THREAD,
 707				       vmd->name, &vmd->irqs[i]);
 708		if (err)
 709			return err;
 710	}
 711
 712	return 0;
 713}
 714
 715/*
 716 * Since VMD is an aperture to regular PCIe root ports, only allow it to
 717 * control features that the OS is allowed to control on the physical PCI bus.
 718 */
 719static void vmd_copy_host_bridge_flags(struct pci_host_bridge *root_bridge,
 720				       struct pci_host_bridge *vmd_bridge)
 721{
 722	vmd_bridge->native_pcie_hotplug = root_bridge->native_pcie_hotplug;
 723	vmd_bridge->native_shpc_hotplug = root_bridge->native_shpc_hotplug;
 724	vmd_bridge->native_aer = root_bridge->native_aer;
 725	vmd_bridge->native_pme = root_bridge->native_pme;
 726	vmd_bridge->native_ltr = root_bridge->native_ltr;
 727	vmd_bridge->native_dpc = root_bridge->native_dpc;
 728}
 729
 730/*
 731 * Enable ASPM and LTR settings on devices that aren't configured by BIOS.
 732 */
 733static int vmd_pm_enable_quirk(struct pci_dev *pdev, void *userdata)
 734{
 735	unsigned long features = *(unsigned long *)userdata;
 736	u16 ltr = VMD_BIOS_PM_QUIRK_LTR;
 737	u32 ltr_reg;
 738	int pos;
 739
 740	if (!(features & VMD_FEAT_BIOS_PM_QUIRK))
 741		return 0;
 742
 743	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_LTR);
 744	if (!pos)
 745		goto out_state_change;
 746
 747	/*
 748	 * Skip if the max snoop LTR is non-zero, indicating BIOS has set it
 749	 * so the LTR quirk is not needed.
 750	 */
 751	pci_read_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, &ltr_reg);
 752	if (!!(ltr_reg & (PCI_LTR_VALUE_MASK | PCI_LTR_SCALE_MASK)))
 753		goto out_state_change;
 754
 755	/*
 756	 * Set the default values to the maximum required by the platform to
 757	 * allow the deepest power management savings. Write as a DWORD where
 758	 * the lower word is the max snoop latency and the upper word is the
 759	 * max non-snoop latency.
 760	 */
 761	ltr_reg = (ltr << 16) | ltr;
 762	pci_write_config_dword(pdev, pos + PCI_LTR_MAX_SNOOP_LAT, ltr_reg);
 763	pci_info(pdev, "VMD: Default LTR value set by driver\n");
 764
 765out_state_change:
 766	/*
 767	 * Ensure devices are in D0 before enabling PCI-PM L1 PM Substates, per
 768	 * PCIe r6.0, sec 5.5.4.
 769	 */
 770	pci_set_power_state_locked(pdev, PCI_D0);
 771	pci_enable_link_state_locked(pdev, PCIE_LINK_STATE_ALL);
 772	return 0;
 773}
 774
 775static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
 776{
 777	struct pci_sysdata *sd = &vmd->sysdata;
 
 778	struct resource *res;
 779	u32 upper_bits;
 780	unsigned long flags;
 781	LIST_HEAD(resources);
 782	resource_size_t offset[2] = {0};
 783	resource_size_t membar2_offset = 0x2000;
 784	struct pci_bus *child;
 785	struct pci_dev *dev;
 786	int ret;
 787
 788	/*
 789	 * Shadow registers may exist in certain VMD device ids which allow
 790	 * guests to correctly assign host physical addresses to the root ports
 791	 * and child devices. These registers will either return the host value
 792	 * or 0, depending on an enable bit in the VMD device.
 793	 */
 794	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
 
 
 
 795		membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
 796		ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]);
 797		if (ret)
 798			return ret;
 799	} else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
 800		ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]);
 801		if (ret)
 802			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803	}
 804
 805	/*
 806	 * Certain VMD devices may have a root port configuration option which
 807	 * limits the bus range to between 0-127, 128-255, or 224-255
 808	 */
 809	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
 810		ret = vmd_get_bus_number_start(vmd);
 811		if (ret)
 812			return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 813	}
 814
 815	res = &vmd->dev->resource[VMD_CFGBAR];
 816	vmd->resources[0] = (struct resource) {
 817		.name  = "VMD CFGBAR",
 818		.start = vmd->busn_start,
 819		.end   = vmd->busn_start + (resource_size(res) >> 20) - 1,
 820		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
 821	};
 822
 823	/*
 824	 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
 825	 * put 32-bit resources in the window.
 826	 *
 827	 * There's no hardware reason why a 64-bit window *couldn't*
 828	 * contain a 32-bit resource, but pbus_size_mem() computes the
 829	 * bridge window size assuming a 64-bit window will contain no
 830	 * 32-bit resources.  __pci_assign_resource() enforces that
 831	 * artificial restriction to make sure everything will fit.
 832	 *
 833	 * The only way we could use a 64-bit non-prefetchable MEMBAR is
 834	 * if its address is <4GB so that we can convert it to a 32-bit
 835	 * resource.  To be visible to the host OS, all VMD endpoints must
 836	 * be initially configured by platform BIOS, which includes setting
 837	 * up these resources.  We can assume the device is configured
 838	 * according to the platform needs.
 839	 */
 840	res = &vmd->dev->resource[VMD_MEMBAR1];
 841	upper_bits = upper_32_bits(res->end);
 842	flags = res->flags & ~IORESOURCE_SIZEALIGN;
 843	if (!upper_bits)
 844		flags &= ~IORESOURCE_MEM_64;
 845	vmd->resources[1] = (struct resource) {
 846		.name  = "VMD MEMBAR1",
 847		.start = res->start,
 848		.end   = res->end,
 849		.flags = flags,
 850		.parent = res,
 851	};
 852
 853	res = &vmd->dev->resource[VMD_MEMBAR2];
 854	upper_bits = upper_32_bits(res->end);
 855	flags = res->flags & ~IORESOURCE_SIZEALIGN;
 856	if (!upper_bits)
 857		flags &= ~IORESOURCE_MEM_64;
 858	vmd->resources[2] = (struct resource) {
 859		.name  = "VMD MEMBAR2",
 860		.start = res->start + membar2_offset,
 861		.end   = res->end,
 862		.flags = flags,
 863		.parent = res,
 864	};
 865
 866	sd->vmd_dev = vmd->dev;
 867	sd->domain = vmd_find_free_domain();
 868	if (sd->domain < 0)
 869		return sd->domain;
 870
 871	sd->node = pcibus_to_node(vmd->dev->bus);
 872
 873	/*
 874	 * Currently MSI remapping must be enabled in guest passthrough mode
 875	 * due to some missing interrupt remapping plumbing. This is probably
 876	 * acceptable because the guest is usually CPU-limited and MSI
 877	 * remapping doesn't become a performance bottleneck.
 878	 */
 879	if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
 880	    offset[0] || offset[1]) {
 881		ret = vmd_alloc_irqs(vmd);
 882		if (ret)
 883			return ret;
 884
 885		vmd_set_msi_remapping(vmd, true);
 886
 887		ret = vmd_create_irq_domain(vmd);
 888		if (ret)
 889			return ret;
 890
 891		/*
 892		 * Override the IRQ domain bus token so the domain can be
 893		 * distinguished from a regular PCI/MSI domain.
 894		 */
 895		irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
 896	} else {
 897		vmd_set_msi_remapping(vmd, false);
 898	}
 899
 900	pci_add_resource(&resources, &vmd->resources[0]);
 901	pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
 902	pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
 903
 904	vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
 905				       &vmd_ops, sd, &resources);
 906	if (!vmd->bus) {
 907		pci_free_resource_list(&resources);
 908		vmd_remove_irq_domain(vmd);
 
 909		return -ENODEV;
 910	}
 911
 912	vmd_copy_host_bridge_flags(pci_find_host_bridge(vmd->dev->bus),
 913				   to_pci_host_bridge(vmd->bus->bridge));
 914
 915	vmd_attach_resources(vmd);
 916	if (vmd->irq_domain)
 917		dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
 918	else
 919		dev_set_msi_domain(&vmd->bus->dev,
 920				   dev_get_msi_domain(&vmd->dev->dev));
 921
 922	WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
 923			       "domain"), "Can't create symlink to domain\n");
 924
 925	vmd_acpi_begin();
 926
 927	pci_scan_child_bus(vmd->bus);
 928	vmd_domain_reset(vmd);
 929
 930	/* When Intel VMD is enabled, the OS does not discover the Root Ports
 931	 * owned by Intel VMD within the MMCFG space. pci_reset_bus() applies
 932	 * a reset to the parent of the PCI device supplied as argument. This
 933	 * is why we pass a child device, so the reset can be triggered at
 934	 * the Intel bridge level and propagated to all the children in the
 935	 * hierarchy.
 936	 */
 937	list_for_each_entry(child, &vmd->bus->children, node) {
 938		if (!list_empty(&child->devices)) {
 939			dev = list_first_entry(&child->devices,
 940					       struct pci_dev, bus_list);
 941			ret = pci_reset_bus(dev);
 942			if (ret)
 943				pci_warn(dev, "can't reset device: %d\n", ret);
 944
 945			break;
 946		}
 947	}
 948
 949	pci_assign_unassigned_bus_resources(vmd->bus);
 950
 951	pci_walk_bus(vmd->bus, vmd_pm_enable_quirk, &features);
 952
 953	/*
 954	 * VMD root buses are virtual and don't return true on pci_is_pcie()
 955	 * and will fail pcie_bus_configure_settings() early. It can instead be
 956	 * run on each of the real root ports.
 957	 */
 958	list_for_each_entry(child, &vmd->bus->children, node)
 959		pcie_bus_configure_settings(child);
 960
 961	pci_bus_add_devices(vmd->bus);
 962
 963	vmd_acpi_end();
 
 964	return 0;
 965}
 966
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 967static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
 968{
 969	unsigned long features = (unsigned long) id->driver_data;
 970	struct vmd_dev *vmd;
 971	int err;
 972
 973	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
 974		return -ENOMEM;
 975
 976	vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
 977	if (!vmd)
 978		return -ENOMEM;
 979
 980	vmd->dev = dev;
 981	vmd->instance = ida_alloc(&vmd_instance_ida, GFP_KERNEL);
 982	if (vmd->instance < 0)
 983		return vmd->instance;
 984
 985	vmd->name = devm_kasprintf(&dev->dev, GFP_KERNEL, "vmd%d",
 986				   vmd->instance);
 987	if (!vmd->name) {
 988		err = -ENOMEM;
 989		goto out_release_instance;
 990	}
 991
 992	err = pcim_enable_device(dev);
 993	if (err < 0)
 994		goto out_release_instance;
 995
 996	vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
 997	if (!vmd->cfgbar) {
 998		err = -ENOMEM;
 999		goto out_release_instance;
1000	}
1001
1002	pci_set_master(dev);
1003	if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
1004	    dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32))) {
1005		err = -ENODEV;
1006		goto out_release_instance;
1007	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1008
1009	if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
1010		vmd->first_vec = 1;
 
 
 
 
 
1011
1012	spin_lock_init(&vmd->cfg_lock);
1013	pci_set_drvdata(dev, vmd);
1014	err = vmd_enable_domain(vmd, features);
1015	if (err)
1016		goto out_release_instance;
1017
1018	dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
1019		 vmd->sysdata.domain);
1020	return 0;
1021
1022 out_release_instance:
1023	ida_free(&vmd_instance_ida, vmd->instance);
1024	return err;
1025}
1026
1027static void vmd_cleanup_srcu(struct vmd_dev *vmd)
1028{
1029	int i;
1030
1031	for (i = 0; i < vmd->msix_count; i++)
1032		cleanup_srcu_struct(&vmd->irqs[i].srcu);
1033}
1034
1035static void vmd_remove(struct pci_dev *dev)
1036{
1037	struct vmd_dev *vmd = pci_get_drvdata(dev);
 
1038
1039	pci_stop_root_bus(vmd->bus);
1040	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
 
1041	pci_remove_root_bus(vmd->bus);
1042	vmd_cleanup_srcu(vmd);
1043	vmd_detach_resources(vmd);
1044	vmd_remove_irq_domain(vmd);
1045	ida_free(&vmd_instance_ida, vmd->instance);
1046}
1047
1048static void vmd_shutdown(struct pci_dev *dev)
1049{
1050	struct vmd_dev *vmd = pci_get_drvdata(dev);
1051
1052	vmd_remove_irq_domain(vmd);
1053}
1054
1055#ifdef CONFIG_PM_SLEEP
1056static int vmd_suspend(struct device *dev)
1057{
1058	struct pci_dev *pdev = to_pci_dev(dev);
1059	struct vmd_dev *vmd = pci_get_drvdata(pdev);
1060	int i;
1061
1062	for (i = 0; i < vmd->msix_count; i++)
1063		devm_free_irq(dev, vmd->irqs[i].virq, &vmd->irqs[i]);
1064
 
1065	return 0;
1066}
1067
1068static int vmd_resume(struct device *dev)
1069{
1070	struct pci_dev *pdev = to_pci_dev(dev);
1071	struct vmd_dev *vmd = pci_get_drvdata(pdev);
1072	int err, i;
1073
1074	vmd_set_msi_remapping(vmd, !!vmd->irq_domain);
1075
1076	for (i = 0; i < vmd->msix_count; i++) {
1077		err = devm_request_irq(dev, vmd->irqs[i].virq,
1078				       vmd_irq, IRQF_NO_THREAD,
1079				       vmd->name, &vmd->irqs[i]);
1080		if (err)
1081			return err;
1082	}
1083
 
1084	return 0;
1085}
1086#endif
1087static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
1088
1089static const struct pci_device_id vmd_ids[] = {
1090	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
1091		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
1092	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
1093		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
1094				VMD_FEAT_HAS_BUS_RESTRICTIONS |
1095				VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
1096	{PCI_VDEVICE(INTEL, 0x467f),
1097		.driver_data = VMD_FEATS_CLIENT,},
1098	{PCI_VDEVICE(INTEL, 0x4c3d),
1099		.driver_data = VMD_FEATS_CLIENT,},
1100	{PCI_VDEVICE(INTEL, 0xa77f),
1101		.driver_data = VMD_FEATS_CLIENT,},
1102	{PCI_VDEVICE(INTEL, 0x7d0b),
1103		.driver_data = VMD_FEATS_CLIENT,},
1104	{PCI_VDEVICE(INTEL, 0xad0b),
1105		.driver_data = VMD_FEATS_CLIENT,},
1106	{PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
1107		.driver_data = VMD_FEATS_CLIENT,},
1108	{PCI_VDEVICE(INTEL, 0xb60b),
1109                .driver_data = VMD_FEATS_CLIENT,},
1110	{PCI_VDEVICE(INTEL, 0xb06f),
1111                .driver_data = VMD_FEATS_CLIENT,},
1112	{0,}
1113};
1114MODULE_DEVICE_TABLE(pci, vmd_ids);
1115
1116static struct pci_driver vmd_drv = {
1117	.name		= "vmd",
1118	.id_table	= vmd_ids,
1119	.probe		= vmd_probe,
1120	.remove		= vmd_remove,
1121	.shutdown	= vmd_shutdown,
1122	.driver		= {
1123		.pm	= &vmd_dev_pm_ops,
1124	},
1125};
1126module_pci_driver(vmd_drv);
1127
1128MODULE_AUTHOR("Intel Corporation");
1129MODULE_DESCRIPTION("Volume Management Device driver");
1130MODULE_LICENSE("GPL v2");
1131MODULE_VERSION("0.6");
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Volume Management Device driver
  4 * Copyright (c) 2015, Intel Corporation.
  5 */
  6
  7#include <linux/device.h>
  8#include <linux/interrupt.h>
  9#include <linux/irq.h>
 10#include <linux/kernel.h>
 11#include <linux/module.h>
 12#include <linux/msi.h>
 13#include <linux/pci.h>
 
 
 14#include <linux/srcu.h>
 15#include <linux/rculist.h>
 16#include <linux/rcupdate.h>
 17
 18#include <asm/irqdomain.h>
 19#include <asm/device.h>
 20#include <asm/msi.h>
 21#include <asm/msidef.h>
 22
 23#define VMD_CFGBAR	0
 24#define VMD_MEMBAR1	2
 25#define VMD_MEMBAR2	4
 26
 27#define PCI_REG_VMCAP		0x40
 28#define BUS_RESTRICT_CAP(vmcap)	(vmcap & 0x1)
 29#define PCI_REG_VMCONFIG	0x44
 30#define BUS_RESTRICT_CFG(vmcfg)	((vmcfg >> 8) & 0x3)
 
 31#define PCI_REG_VMLOCK		0x70
 32#define MB2_SHADOW_EN(vmlock)	(vmlock & 0x2)
 33
 34#define MB2_SHADOW_OFFSET	0x2000
 35#define MB2_SHADOW_SIZE		16
 36
 37enum vmd_features {
 38	/*
 39	 * Device may contain registers which hint the physical location of the
 40	 * membars, in order to allow proper address translation during
 41	 * resource assignment to enable guest virtualization
 42	 */
 43	VMD_FEAT_HAS_MEMBAR_SHADOW		= (1 << 0),
 44
 45	/*
 46	 * Device may provide root port configuration information which limits
 47	 * bus numbering
 48	 */
 49	VMD_FEAT_HAS_BUS_RESTRICTIONS		= (1 << 1),
 50
 51	/*
 52	 * Device contains physical location shadow registers in
 53	 * vendor-specific capability space
 54	 */
 55	VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP	= (1 << 2),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 56};
 57
 
 
 
 
 
 
 
 
 
 58/*
 59 * Lock for manipulating VMD IRQ lists.
 60 */
 61static DEFINE_RAW_SPINLOCK(list_lock);
 62
 63/**
 64 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
 65 * @node:	list item for parent traversal.
 66 * @irq:	back pointer to parent.
 67 * @enabled:	true if driver enabled IRQ
 68 * @virq:	the virtual IRQ value provided to the requesting driver.
 69 *
 70 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
 71 * a VMD IRQ using this structure.
 72 */
 73struct vmd_irq {
 74	struct list_head	node;
 75	struct vmd_irq_list	*irq;
 76	bool			enabled;
 77	unsigned int		virq;
 78};
 79
 80/**
 81 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
 82 * @irq_list:	the list of irq's the VMD one demuxes to.
 83 * @srcu:	SRCU struct for local synchronization.
 84 * @count:	number of child IRQs assigned to this vector; used to track
 85 *		sharing.
 
 86 */
 87struct vmd_irq_list {
 88	struct list_head	irq_list;
 89	struct srcu_struct	srcu;
 90	unsigned int		count;
 
 91};
 92
 93struct vmd_dev {
 94	struct pci_dev		*dev;
 95
 96	spinlock_t		cfg_lock;
 97	char __iomem		*cfgbar;
 98
 99	int msix_count;
100	struct vmd_irq_list	*irqs;
101
102	struct pci_sysdata	sysdata;
103	struct resource		resources[3];
104	struct irq_domain	*irq_domain;
105	struct pci_bus		*bus;
106	u8			busn_start;
 
 
 
107};
108
109static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
110{
111	return container_of(bus->sysdata, struct vmd_dev, sysdata);
112}
113
114static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
115					   struct vmd_irq_list *irqs)
116{
117	return irqs - vmd->irqs;
118}
119
120/*
121 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
122 * but the MSI entry for the hardware it's driving will be programmed with a
123 * destination ID for the VMD MSI-X table.  The VMD muxes interrupts in its
124 * domain into one of its own, and the VMD driver de-muxes these for the
125 * handlers sharing that VMD IRQ.  The vmd irq_domain provides the operations
126 * and irq_chip to set this up.
127 */
128static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
129{
130	struct vmd_irq *vmdirq = data->chip_data;
131	struct vmd_irq_list *irq = vmdirq->irq;
132	struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
133
134	msg->address_hi = MSI_ADDR_BASE_HI;
135	msg->address_lo = MSI_ADDR_BASE_LO |
136			  MSI_ADDR_DEST_ID(index_from_irqs(vmd, irq));
137	msg->data = 0;
138}
139
140/*
141 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
142 */
143static void vmd_irq_enable(struct irq_data *data)
144{
145	struct vmd_irq *vmdirq = data->chip_data;
146	unsigned long flags;
147
148	raw_spin_lock_irqsave(&list_lock, flags);
149	WARN_ON(vmdirq->enabled);
150	list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
151	vmdirq->enabled = true;
152	raw_spin_unlock_irqrestore(&list_lock, flags);
153
154	data->chip->irq_unmask(data);
155}
156
157static void vmd_irq_disable(struct irq_data *data)
158{
159	struct vmd_irq *vmdirq = data->chip_data;
160	unsigned long flags;
161
162	data->chip->irq_mask(data);
163
164	raw_spin_lock_irqsave(&list_lock, flags);
165	if (vmdirq->enabled) {
166		list_del_rcu(&vmdirq->node);
167		vmdirq->enabled = false;
168	}
169	raw_spin_unlock_irqrestore(&list_lock, flags);
170}
171
172/*
173 * XXX: Stubbed until we develop acceptable way to not create conflicts with
174 * other devices sharing the same vector.
175 */
176static int vmd_irq_set_affinity(struct irq_data *data,
177				const struct cpumask *dest, bool force)
178{
179	return -EINVAL;
180}
181
182static struct irq_chip vmd_msi_controller = {
183	.name			= "VMD-MSI",
184	.irq_enable		= vmd_irq_enable,
185	.irq_disable		= vmd_irq_disable,
186	.irq_compose_msi_msg	= vmd_compose_msi_msg,
187	.irq_set_affinity	= vmd_irq_set_affinity,
188};
189
190static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
191				     msi_alloc_info_t *arg)
192{
193	return 0;
194}
195
196/*
197 * XXX: We can be even smarter selecting the best IRQ once we solve the
198 * affinity problem.
199 */
200static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
201{
202	int i, best = 1;
203	unsigned long flags;
 
204
205	if (vmd->msix_count == 1)
206		return &vmd->irqs[0];
207
208	/*
209	 * White list for fast-interrupt handlers. All others will share the
210	 * "slow" interrupt vector.
211	 */
212	switch (msi_desc_to_pci_dev(desc)->class) {
213	case PCI_CLASS_STORAGE_EXPRESS:
214		break;
215	default:
216		return &vmd->irqs[0];
217	}
218
219	raw_spin_lock_irqsave(&list_lock, flags);
220	for (i = 1; i < vmd->msix_count; i++)
 
221		if (vmd->irqs[i].count < vmd->irqs[best].count)
222			best = i;
223	vmd->irqs[best].count++;
224	raw_spin_unlock_irqrestore(&list_lock, flags);
225
226	return &vmd->irqs[best];
227}
228
229static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
230			unsigned int virq, irq_hw_number_t hwirq,
231			msi_alloc_info_t *arg)
232{
233	struct msi_desc *desc = arg->desc;
234	struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
235	struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
236	unsigned int index, vector;
237
238	if (!vmdirq)
239		return -ENOMEM;
240
241	INIT_LIST_HEAD(&vmdirq->node);
242	vmdirq->irq = vmd_next_irq(vmd, desc);
243	vmdirq->virq = virq;
244	index = index_from_irqs(vmd, vmdirq->irq);
245	vector = pci_irq_vector(vmd->dev, index);
246
247	irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
248			    handle_untracked_irq, vmd, NULL);
249	return 0;
250}
251
252static void vmd_msi_free(struct irq_domain *domain,
253			struct msi_domain_info *info, unsigned int virq)
254{
255	struct vmd_irq *vmdirq = irq_get_chip_data(virq);
256	unsigned long flags;
257
258	synchronize_srcu(&vmdirq->irq->srcu);
259
260	/* XXX: Potential optimization to rebalance */
261	raw_spin_lock_irqsave(&list_lock, flags);
262	vmdirq->irq->count--;
263	raw_spin_unlock_irqrestore(&list_lock, flags);
264
265	kfree(vmdirq);
266}
267
268static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
269			   int nvec, msi_alloc_info_t *arg)
270{
271	struct pci_dev *pdev = to_pci_dev(dev);
272	struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
273
274	if (nvec > vmd->msix_count)
275		return vmd->msix_count;
276
277	memset(arg, 0, sizeof(*arg));
278	return 0;
279}
280
281static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
282{
283	arg->desc = desc;
284}
285
286static struct msi_domain_ops vmd_msi_domain_ops = {
287	.get_hwirq	= vmd_get_hwirq,
288	.msi_init	= vmd_msi_init,
289	.msi_free	= vmd_msi_free,
290	.msi_prepare	= vmd_msi_prepare,
291	.set_desc	= vmd_set_desc,
292};
293
294static struct msi_domain_info vmd_msi_domain_info = {
295	.flags		= MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
296			  MSI_FLAG_PCI_MSIX,
297	.ops		= &vmd_msi_domain_ops,
298	.chip		= &vmd_msi_controller,
299};
300
301static char __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
302				  unsigned int devfn, int reg, int len)
303{
304	char __iomem *addr = vmd->cfgbar +
305			     ((bus->number - vmd->busn_start) << 20) +
306			     (devfn << 12) + reg;
307
308	if ((addr - vmd->cfgbar) + len >=
309	    resource_size(&vmd->dev->resource[VMD_CFGBAR]))
310		return NULL;
311
312	return addr;
313}
314
315/*
316 * CPU may deadlock if config space is not serialized on some versions of this
317 * hardware, so all config space access is done under a spinlock.
318 */
319static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
320			int len, u32 *value)
321{
322	struct vmd_dev *vmd = vmd_from_bus(bus);
323	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
324	unsigned long flags;
325	int ret = 0;
326
327	if (!addr)
328		return -EFAULT;
329
330	spin_lock_irqsave(&vmd->cfg_lock, flags);
331	switch (len) {
332	case 1:
333		*value = readb(addr);
334		break;
335	case 2:
336		*value = readw(addr);
337		break;
338	case 4:
339		*value = readl(addr);
340		break;
341	default:
342		ret = -EINVAL;
343		break;
344	}
345	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
346	return ret;
347}
348
349/*
350 * VMD h/w converts non-posted config writes to posted memory writes. The
351 * read-back in this function forces the completion so it returns only after
352 * the config space was written, as expected.
353 */
354static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
355			 int len, u32 value)
356{
357	struct vmd_dev *vmd = vmd_from_bus(bus);
358	char __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
359	unsigned long flags;
360	int ret = 0;
361
362	if (!addr)
363		return -EFAULT;
364
365	spin_lock_irqsave(&vmd->cfg_lock, flags);
366	switch (len) {
367	case 1:
368		writeb(value, addr);
369		readb(addr);
370		break;
371	case 2:
372		writew(value, addr);
373		readw(addr);
374		break;
375	case 4:
376		writel(value, addr);
377		readl(addr);
378		break;
379	default:
380		ret = -EINVAL;
381		break;
382	}
383	spin_unlock_irqrestore(&vmd->cfg_lock, flags);
384	return ret;
385}
386
387static struct pci_ops vmd_ops = {
388	.read		= vmd_pci_read,
389	.write		= vmd_pci_write,
390};
391
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392static void vmd_attach_resources(struct vmd_dev *vmd)
393{
394	vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
395	vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
396}
397
398static void vmd_detach_resources(struct vmd_dev *vmd)
399{
400	vmd->dev->resource[VMD_MEMBAR1].child = NULL;
401	vmd->dev->resource[VMD_MEMBAR2].child = NULL;
402}
403
404/*
405 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
406 * Per ACPI r6.0, sec 6.5.6,  _SEG returns an integer, of which the lower
407 * 16 bits are the PCI Segment Group (domain) number.  Other bits are
408 * currently reserved.
409 */
410static int vmd_find_free_domain(void)
411{
412	int domain = 0xffff;
413	struct pci_bus *bus = NULL;
414
415	while ((bus = pci_find_next_bus(bus)) != NULL)
416		domain = max_t(int, domain, pci_domain_nr(bus));
417	return domain + 1;
418}
419
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
420static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
421{
422	struct pci_sysdata *sd = &vmd->sysdata;
423	struct fwnode_handle *fn;
424	struct resource *res;
425	u32 upper_bits;
426	unsigned long flags;
427	LIST_HEAD(resources);
428	resource_size_t offset[2] = {0};
429	resource_size_t membar2_offset = 0x2000;
430	struct pci_bus *child;
 
 
431
432	/*
433	 * Shadow registers may exist in certain VMD device ids which allow
434	 * guests to correctly assign host physical addresses to the root ports
435	 * and child devices. These registers will either return the host value
436	 * or 0, depending on an enable bit in the VMD device.
437	 */
438	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
439		u32 vmlock;
440		int ret;
441
442		membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
443		ret = pci_read_config_dword(vmd->dev, PCI_REG_VMLOCK, &vmlock);
444		if (ret || vmlock == ~0)
445			return -ENODEV;
446
447		if (MB2_SHADOW_EN(vmlock)) {
448			void __iomem *membar2;
449
450			membar2 = pci_iomap(vmd->dev, VMD_MEMBAR2, 0);
451			if (!membar2)
452				return -ENOMEM;
453			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
454					(readq(membar2 + MB2_SHADOW_OFFSET) &
455					 PCI_BASE_ADDRESS_MEM_MASK);
456			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
457					(readq(membar2 + MB2_SHADOW_OFFSET + 8) &
458					 PCI_BASE_ADDRESS_MEM_MASK);
459			pci_iounmap(vmd->dev, membar2);
460		}
461	}
462
463	if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
464		int pos = pci_find_capability(vmd->dev, PCI_CAP_ID_VNDR);
465		u32 reg, regu;
466
467		pci_read_config_dword(vmd->dev, pos + 4, &reg);
468
469		/* "SHDW" */
470		if (pos && reg == 0x53484457) {
471			pci_read_config_dword(vmd->dev, pos + 8, &reg);
472			pci_read_config_dword(vmd->dev, pos + 12, &regu);
473			offset[0] = vmd->dev->resource[VMD_MEMBAR1].start -
474					(((u64) regu << 32 | reg) &
475					 PCI_BASE_ADDRESS_MEM_MASK);
476
477			pci_read_config_dword(vmd->dev, pos + 16, &reg);
478			pci_read_config_dword(vmd->dev, pos + 20, &regu);
479			offset[1] = vmd->dev->resource[VMD_MEMBAR2].start -
480					(((u64) regu << 32 | reg) &
481					 PCI_BASE_ADDRESS_MEM_MASK);
482		}
483	}
484
485	/*
486	 * Certain VMD devices may have a root port configuration option which
487	 * limits the bus range to between 0-127, 128-255, or 224-255
488	 */
489	if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
490		u16 reg16;
491
492		pci_read_config_word(vmd->dev, PCI_REG_VMCAP, &reg16);
493		if (BUS_RESTRICT_CAP(reg16)) {
494			pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG,
495					     &reg16);
496
497			switch (BUS_RESTRICT_CFG(reg16)) {
498			case 1:
499				vmd->busn_start = 128;
500				break;
501			case 2:
502				vmd->busn_start = 224;
503				break;
504			case 3:
505				pci_err(vmd->dev, "Unknown Bus Offset Setting\n");
506				return -ENODEV;
507			default:
508				break;
509			}
510		}
511	}
512
513	res = &vmd->dev->resource[VMD_CFGBAR];
514	vmd->resources[0] = (struct resource) {
515		.name  = "VMD CFGBAR",
516		.start = vmd->busn_start,
517		.end   = vmd->busn_start + (resource_size(res) >> 20) - 1,
518		.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
519	};
520
521	/*
522	 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
523	 * put 32-bit resources in the window.
524	 *
525	 * There's no hardware reason why a 64-bit window *couldn't*
526	 * contain a 32-bit resource, but pbus_size_mem() computes the
527	 * bridge window size assuming a 64-bit window will contain no
528	 * 32-bit resources.  __pci_assign_resource() enforces that
529	 * artificial restriction to make sure everything will fit.
530	 *
531	 * The only way we could use a 64-bit non-prefetchable MEMBAR is
532	 * if its address is <4GB so that we can convert it to a 32-bit
533	 * resource.  To be visible to the host OS, all VMD endpoints must
534	 * be initially configured by platform BIOS, which includes setting
535	 * up these resources.  We can assume the device is configured
536	 * according to the platform needs.
537	 */
538	res = &vmd->dev->resource[VMD_MEMBAR1];
539	upper_bits = upper_32_bits(res->end);
540	flags = res->flags & ~IORESOURCE_SIZEALIGN;
541	if (!upper_bits)
542		flags &= ~IORESOURCE_MEM_64;
543	vmd->resources[1] = (struct resource) {
544		.name  = "VMD MEMBAR1",
545		.start = res->start,
546		.end   = res->end,
547		.flags = flags,
548		.parent = res,
549	};
550
551	res = &vmd->dev->resource[VMD_MEMBAR2];
552	upper_bits = upper_32_bits(res->end);
553	flags = res->flags & ~IORESOURCE_SIZEALIGN;
554	if (!upper_bits)
555		flags &= ~IORESOURCE_MEM_64;
556	vmd->resources[2] = (struct resource) {
557		.name  = "VMD MEMBAR2",
558		.start = res->start + membar2_offset,
559		.end   = res->end,
560		.flags = flags,
561		.parent = res,
562	};
563
564	sd->vmd_dev = vmd->dev;
565	sd->domain = vmd_find_free_domain();
566	if (sd->domain < 0)
567		return sd->domain;
568
569	sd->node = pcibus_to_node(vmd->dev->bus);
570
571	fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
572	if (!fn)
573		return -ENODEV;
574
575	vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info,
576						    x86_vector_domain);
577	if (!vmd->irq_domain) {
578		irq_domain_free_fwnode(fn);
579		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
580	}
581
582	pci_add_resource(&resources, &vmd->resources[0]);
583	pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
584	pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
585
586	vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
587				       &vmd_ops, sd, &resources);
588	if (!vmd->bus) {
589		pci_free_resource_list(&resources);
590		irq_domain_remove(vmd->irq_domain);
591		irq_domain_free_fwnode(fn);
592		return -ENODEV;
593	}
594
 
 
 
595	vmd_attach_resources(vmd);
596	dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
 
 
 
 
 
 
 
 
 
597
598	pci_scan_child_bus(vmd->bus);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
599	pci_assign_unassigned_bus_resources(vmd->bus);
600
 
 
601	/*
602	 * VMD root buses are virtual and don't return true on pci_is_pcie()
603	 * and will fail pcie_bus_configure_settings() early. It can instead be
604	 * run on each of the real root ports.
605	 */
606	list_for_each_entry(child, &vmd->bus->children, node)
607		pcie_bus_configure_settings(child);
608
609	pci_bus_add_devices(vmd->bus);
610
611	WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
612			       "domain"), "Can't create symlink to domain\n");
613	return 0;
614}
615
616static irqreturn_t vmd_irq(int irq, void *data)
617{
618	struct vmd_irq_list *irqs = data;
619	struct vmd_irq *vmdirq;
620	int idx;
621
622	idx = srcu_read_lock(&irqs->srcu);
623	list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
624		generic_handle_irq(vmdirq->virq);
625	srcu_read_unlock(&irqs->srcu, idx);
626
627	return IRQ_HANDLED;
628}
629
630static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
631{
 
632	struct vmd_dev *vmd;
633	int i, err;
634
635	if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
636		return -ENOMEM;
637
638	vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
639	if (!vmd)
640		return -ENOMEM;
641
642	vmd->dev = dev;
 
 
 
 
 
 
 
 
 
 
 
643	err = pcim_enable_device(dev);
644	if (err < 0)
645		return err;
646
647	vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
648	if (!vmd->cfgbar)
649		return -ENOMEM;
 
 
650
651	pci_set_master(dev);
652	if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
653	    dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
654		return -ENODEV;
655
656	vmd->msix_count = pci_msix_vec_count(dev);
657	if (vmd->msix_count < 0)
658		return -ENODEV;
659
660	vmd->msix_count = pci_alloc_irq_vectors(dev, 1, vmd->msix_count,
661					PCI_IRQ_MSIX);
662	if (vmd->msix_count < 0)
663		return vmd->msix_count;
664
665	vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
666				 GFP_KERNEL);
667	if (!vmd->irqs)
668		return -ENOMEM;
669
670	for (i = 0; i < vmd->msix_count; i++) {
671		err = init_srcu_struct(&vmd->irqs[i].srcu);
672		if (err)
673			return err;
674
675		INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
676		err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
677				       vmd_irq, IRQF_NO_THREAD,
678				       "vmd", &vmd->irqs[i]);
679		if (err)
680			return err;
681	}
682
683	spin_lock_init(&vmd->cfg_lock);
684	pci_set_drvdata(dev, vmd);
685	err = vmd_enable_domain(vmd, (unsigned long) id->driver_data);
686	if (err)
687		return err;
688
689	dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
690		 vmd->sysdata.domain);
691	return 0;
 
 
 
 
692}
693
694static void vmd_cleanup_srcu(struct vmd_dev *vmd)
695{
696	int i;
697
698	for (i = 0; i < vmd->msix_count; i++)
699		cleanup_srcu_struct(&vmd->irqs[i].srcu);
700}
701
702static void vmd_remove(struct pci_dev *dev)
703{
704	struct vmd_dev *vmd = pci_get_drvdata(dev);
705	struct fwnode_handle *fn = vmd->irq_domain->fwnode;
706
 
707	sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
708	pci_stop_root_bus(vmd->bus);
709	pci_remove_root_bus(vmd->bus);
710	vmd_cleanup_srcu(vmd);
711	vmd_detach_resources(vmd);
712	irq_domain_remove(vmd->irq_domain);
713	irq_domain_free_fwnode(fn);
 
 
 
 
 
 
 
714}
715
716#ifdef CONFIG_PM_SLEEP
717static int vmd_suspend(struct device *dev)
718{
719	struct pci_dev *pdev = to_pci_dev(dev);
720	struct vmd_dev *vmd = pci_get_drvdata(pdev);
721	int i;
722
723	for (i = 0; i < vmd->msix_count; i++)
724		devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
725
726	pci_save_state(pdev);
727	return 0;
728}
729
730static int vmd_resume(struct device *dev)
731{
732	struct pci_dev *pdev = to_pci_dev(dev);
733	struct vmd_dev *vmd = pci_get_drvdata(pdev);
734	int err, i;
735
 
 
736	for (i = 0; i < vmd->msix_count; i++) {
737		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
738				       vmd_irq, IRQF_NO_THREAD,
739				       "vmd", &vmd->irqs[i]);
740		if (err)
741			return err;
742	}
743
744	pci_restore_state(pdev);
745	return 0;
746}
747#endif
748static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
749
750static const struct pci_device_id vmd_ids[] = {
751	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
752		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
753	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
754		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
755				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
756	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
757		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
758				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
759	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
760		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
761				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
762	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
763		.driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
764				VMD_FEAT_HAS_BUS_RESTRICTIONS,},
 
 
 
 
 
 
 
 
765	{0,}
766};
767MODULE_DEVICE_TABLE(pci, vmd_ids);
768
769static struct pci_driver vmd_drv = {
770	.name		= "vmd",
771	.id_table	= vmd_ids,
772	.probe		= vmd_probe,
773	.remove		= vmd_remove,
 
774	.driver		= {
775		.pm	= &vmd_dev_pm_ops,
776	},
777};
778module_pci_driver(vmd_drv);
779
780MODULE_AUTHOR("Intel Corporation");
 
781MODULE_LICENSE("GPL v2");
782MODULE_VERSION("0.6");