Loading...
Note: File does not exist in v3.1.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Volume Management Device driver
4 * Copyright (c) 2015, Intel Corporation.
5 */
6
7#include <linux/device.h>
8#include <linux/interrupt.h>
9#include <linux/irq.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/msi.h>
13#include <linux/pci.h>
14#include <linux/pci-ecam.h>
15#include <linux/srcu.h>
16#include <linux/rculist.h>
17#include <linux/rcupdate.h>
18
19#include <asm/irqdomain.h>
20#include <asm/device.h>
21#include <asm/msi.h>
22
23#define VMD_CFGBAR 0
24#define VMD_MEMBAR1 2
25#define VMD_MEMBAR2 4
26
27#define PCI_REG_VMCAP 0x40
28#define BUS_RESTRICT_CAP(vmcap) (vmcap & 0x1)
29#define PCI_REG_VMCONFIG 0x44
30#define BUS_RESTRICT_CFG(vmcfg) ((vmcfg >> 8) & 0x3)
31#define VMCONFIG_MSI_REMAP 0x2
32#define PCI_REG_VMLOCK 0x70
33#define MB2_SHADOW_EN(vmlock) (vmlock & 0x2)
34
35#define MB2_SHADOW_OFFSET 0x2000
36#define MB2_SHADOW_SIZE 16
37
38enum vmd_features {
39 /*
40 * Device may contain registers which hint the physical location of the
41 * membars, in order to allow proper address translation during
42 * resource assignment to enable guest virtualization
43 */
44 VMD_FEAT_HAS_MEMBAR_SHADOW = (1 << 0),
45
46 /*
47 * Device may provide root port configuration information which limits
48 * bus numbering
49 */
50 VMD_FEAT_HAS_BUS_RESTRICTIONS = (1 << 1),
51
52 /*
53 * Device contains physical location shadow registers in
54 * vendor-specific capability space
55 */
56 VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP = (1 << 2),
57
58 /*
59 * Device may use MSI-X vector 0 for software triggering and will not
60 * be used for MSI remapping
61 */
62 VMD_FEAT_OFFSET_FIRST_VECTOR = (1 << 3),
63
64 /*
65 * Device can bypass remapping MSI-X transactions into its MSI-X table,
66 * avoiding the requirement of a VMD MSI domain for child device
67 * interrupt handling.
68 */
69 VMD_FEAT_CAN_BYPASS_MSI_REMAP = (1 << 4),
70};
71
72/*
73 * Lock for manipulating VMD IRQ lists.
74 */
75static DEFINE_RAW_SPINLOCK(list_lock);
76
77/**
78 * struct vmd_irq - private data to map driver IRQ to the VMD shared vector
79 * @node: list item for parent traversal.
80 * @irq: back pointer to parent.
81 * @enabled: true if driver enabled IRQ
82 * @virq: the virtual IRQ value provided to the requesting driver.
83 *
84 * Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
85 * a VMD IRQ using this structure.
86 */
87struct vmd_irq {
88 struct list_head node;
89 struct vmd_irq_list *irq;
90 bool enabled;
91 unsigned int virq;
92};
93
94/**
95 * struct vmd_irq_list - list of driver requested IRQs mapping to a VMD vector
96 * @irq_list: the list of irq's the VMD one demuxes to.
97 * @srcu: SRCU struct for local synchronization.
98 * @count: number of child IRQs assigned to this vector; used to track
99 * sharing.
100 */
101struct vmd_irq_list {
102 struct list_head irq_list;
103 struct srcu_struct srcu;
104 unsigned int count;
105};
106
107struct vmd_dev {
108 struct pci_dev *dev;
109
110 spinlock_t cfg_lock;
111 void __iomem *cfgbar;
112
113 int msix_count;
114 struct vmd_irq_list *irqs;
115
116 struct pci_sysdata sysdata;
117 struct resource resources[3];
118 struct irq_domain *irq_domain;
119 struct pci_bus *bus;
120 u8 busn_start;
121 u8 first_vec;
122};
123
124static inline struct vmd_dev *vmd_from_bus(struct pci_bus *bus)
125{
126 return container_of(bus->sysdata, struct vmd_dev, sysdata);
127}
128
129static inline unsigned int index_from_irqs(struct vmd_dev *vmd,
130 struct vmd_irq_list *irqs)
131{
132 return irqs - vmd->irqs;
133}
134
135/*
136 * Drivers managing a device in a VMD domain allocate their own IRQs as before,
137 * but the MSI entry for the hardware it's driving will be programmed with a
138 * destination ID for the VMD MSI-X table. The VMD muxes interrupts in its
139 * domain into one of its own, and the VMD driver de-muxes these for the
140 * handlers sharing that VMD IRQ. The vmd irq_domain provides the operations
141 * and irq_chip to set this up.
142 */
143static void vmd_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
144{
145 struct vmd_irq *vmdirq = data->chip_data;
146 struct vmd_irq_list *irq = vmdirq->irq;
147 struct vmd_dev *vmd = irq_data_get_irq_handler_data(data);
148
149 memset(msg, 0, sizeof(*msg));
150 msg->address_hi = X86_MSI_BASE_ADDRESS_HIGH;
151 msg->arch_addr_lo.base_address = X86_MSI_BASE_ADDRESS_LOW;
152 msg->arch_addr_lo.destid_0_7 = index_from_irqs(vmd, irq);
153}
154
155/*
156 * We rely on MSI_FLAG_USE_DEF_CHIP_OPS to set the IRQ mask/unmask ops.
157 */
158static void vmd_irq_enable(struct irq_data *data)
159{
160 struct vmd_irq *vmdirq = data->chip_data;
161 unsigned long flags;
162
163 raw_spin_lock_irqsave(&list_lock, flags);
164 WARN_ON(vmdirq->enabled);
165 list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
166 vmdirq->enabled = true;
167 raw_spin_unlock_irqrestore(&list_lock, flags);
168
169 data->chip->irq_unmask(data);
170}
171
172static void vmd_irq_disable(struct irq_data *data)
173{
174 struct vmd_irq *vmdirq = data->chip_data;
175 unsigned long flags;
176
177 data->chip->irq_mask(data);
178
179 raw_spin_lock_irqsave(&list_lock, flags);
180 if (vmdirq->enabled) {
181 list_del_rcu(&vmdirq->node);
182 vmdirq->enabled = false;
183 }
184 raw_spin_unlock_irqrestore(&list_lock, flags);
185}
186
187/*
188 * XXX: Stubbed until we develop acceptable way to not create conflicts with
189 * other devices sharing the same vector.
190 */
191static int vmd_irq_set_affinity(struct irq_data *data,
192 const struct cpumask *dest, bool force)
193{
194 return -EINVAL;
195}
196
197static struct irq_chip vmd_msi_controller = {
198 .name = "VMD-MSI",
199 .irq_enable = vmd_irq_enable,
200 .irq_disable = vmd_irq_disable,
201 .irq_compose_msi_msg = vmd_compose_msi_msg,
202 .irq_set_affinity = vmd_irq_set_affinity,
203};
204
205static irq_hw_number_t vmd_get_hwirq(struct msi_domain_info *info,
206 msi_alloc_info_t *arg)
207{
208 return 0;
209}
210
211/*
212 * XXX: We can be even smarter selecting the best IRQ once we solve the
213 * affinity problem.
214 */
215static struct vmd_irq_list *vmd_next_irq(struct vmd_dev *vmd, struct msi_desc *desc)
216{
217 unsigned long flags;
218 int i, best;
219
220 if (vmd->msix_count == 1 + vmd->first_vec)
221 return &vmd->irqs[vmd->first_vec];
222
223 /*
224 * White list for fast-interrupt handlers. All others will share the
225 * "slow" interrupt vector.
226 */
227 switch (msi_desc_to_pci_dev(desc)->class) {
228 case PCI_CLASS_STORAGE_EXPRESS:
229 break;
230 default:
231 return &vmd->irqs[vmd->first_vec];
232 }
233
234 raw_spin_lock_irqsave(&list_lock, flags);
235 best = vmd->first_vec + 1;
236 for (i = best; i < vmd->msix_count; i++)
237 if (vmd->irqs[i].count < vmd->irqs[best].count)
238 best = i;
239 vmd->irqs[best].count++;
240 raw_spin_unlock_irqrestore(&list_lock, flags);
241
242 return &vmd->irqs[best];
243}
244
245static int vmd_msi_init(struct irq_domain *domain, struct msi_domain_info *info,
246 unsigned int virq, irq_hw_number_t hwirq,
247 msi_alloc_info_t *arg)
248{
249 struct msi_desc *desc = arg->desc;
250 struct vmd_dev *vmd = vmd_from_bus(msi_desc_to_pci_dev(desc)->bus);
251 struct vmd_irq *vmdirq = kzalloc(sizeof(*vmdirq), GFP_KERNEL);
252 unsigned int index, vector;
253
254 if (!vmdirq)
255 return -ENOMEM;
256
257 INIT_LIST_HEAD(&vmdirq->node);
258 vmdirq->irq = vmd_next_irq(vmd, desc);
259 vmdirq->virq = virq;
260 index = index_from_irqs(vmd, vmdirq->irq);
261 vector = pci_irq_vector(vmd->dev, index);
262
263 irq_domain_set_info(domain, virq, vector, info->chip, vmdirq,
264 handle_untracked_irq, vmd, NULL);
265 return 0;
266}
267
268static void vmd_msi_free(struct irq_domain *domain,
269 struct msi_domain_info *info, unsigned int virq)
270{
271 struct vmd_irq *vmdirq = irq_get_chip_data(virq);
272 unsigned long flags;
273
274 synchronize_srcu(&vmdirq->irq->srcu);
275
276 /* XXX: Potential optimization to rebalance */
277 raw_spin_lock_irqsave(&list_lock, flags);
278 vmdirq->irq->count--;
279 raw_spin_unlock_irqrestore(&list_lock, flags);
280
281 kfree(vmdirq);
282}
283
284static int vmd_msi_prepare(struct irq_domain *domain, struct device *dev,
285 int nvec, msi_alloc_info_t *arg)
286{
287 struct pci_dev *pdev = to_pci_dev(dev);
288 struct vmd_dev *vmd = vmd_from_bus(pdev->bus);
289
290 if (nvec > vmd->msix_count)
291 return vmd->msix_count;
292
293 memset(arg, 0, sizeof(*arg));
294 return 0;
295}
296
297static void vmd_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
298{
299 arg->desc = desc;
300}
301
302static struct msi_domain_ops vmd_msi_domain_ops = {
303 .get_hwirq = vmd_get_hwirq,
304 .msi_init = vmd_msi_init,
305 .msi_free = vmd_msi_free,
306 .msi_prepare = vmd_msi_prepare,
307 .set_desc = vmd_set_desc,
308};
309
310static struct msi_domain_info vmd_msi_domain_info = {
311 .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
312 MSI_FLAG_PCI_MSIX,
313 .ops = &vmd_msi_domain_ops,
314 .chip = &vmd_msi_controller,
315};
316
317static void vmd_set_msi_remapping(struct vmd_dev *vmd, bool enable)
318{
319 u16 reg;
320
321 pci_read_config_word(vmd->dev, PCI_REG_VMCONFIG, ®);
322 reg = enable ? (reg & ~VMCONFIG_MSI_REMAP) :
323 (reg | VMCONFIG_MSI_REMAP);
324 pci_write_config_word(vmd->dev, PCI_REG_VMCONFIG, reg);
325}
326
327static int vmd_create_irq_domain(struct vmd_dev *vmd)
328{
329 struct fwnode_handle *fn;
330
331 fn = irq_domain_alloc_named_id_fwnode("VMD-MSI", vmd->sysdata.domain);
332 if (!fn)
333 return -ENODEV;
334
335 vmd->irq_domain = pci_msi_create_irq_domain(fn, &vmd_msi_domain_info, NULL);
336 if (!vmd->irq_domain) {
337 irq_domain_free_fwnode(fn);
338 return -ENODEV;
339 }
340
341 return 0;
342}
343
344static void vmd_remove_irq_domain(struct vmd_dev *vmd)
345{
346 /*
347 * Some production BIOS won't enable remapping between soft reboots.
348 * Ensure remapping is restored before unloading the driver.
349 */
350 if (!vmd->msix_count)
351 vmd_set_msi_remapping(vmd, true);
352
353 if (vmd->irq_domain) {
354 struct fwnode_handle *fn = vmd->irq_domain->fwnode;
355
356 irq_domain_remove(vmd->irq_domain);
357 irq_domain_free_fwnode(fn);
358 }
359}
360
361static void __iomem *vmd_cfg_addr(struct vmd_dev *vmd, struct pci_bus *bus,
362 unsigned int devfn, int reg, int len)
363{
364 unsigned int busnr_ecam = bus->number - vmd->busn_start;
365 u32 offset = PCIE_ECAM_OFFSET(busnr_ecam, devfn, reg);
366
367 if (offset + len >= resource_size(&vmd->dev->resource[VMD_CFGBAR]))
368 return NULL;
369
370 return vmd->cfgbar + offset;
371}
372
373/*
374 * CPU may deadlock if config space is not serialized on some versions of this
375 * hardware, so all config space access is done under a spinlock.
376 */
377static int vmd_pci_read(struct pci_bus *bus, unsigned int devfn, int reg,
378 int len, u32 *value)
379{
380 struct vmd_dev *vmd = vmd_from_bus(bus);
381 void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
382 unsigned long flags;
383 int ret = 0;
384
385 if (!addr)
386 return -EFAULT;
387
388 spin_lock_irqsave(&vmd->cfg_lock, flags);
389 switch (len) {
390 case 1:
391 *value = readb(addr);
392 break;
393 case 2:
394 *value = readw(addr);
395 break;
396 case 4:
397 *value = readl(addr);
398 break;
399 default:
400 ret = -EINVAL;
401 break;
402 }
403 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
404 return ret;
405}
406
407/*
408 * VMD h/w converts non-posted config writes to posted memory writes. The
409 * read-back in this function forces the completion so it returns only after
410 * the config space was written, as expected.
411 */
412static int vmd_pci_write(struct pci_bus *bus, unsigned int devfn, int reg,
413 int len, u32 value)
414{
415 struct vmd_dev *vmd = vmd_from_bus(bus);
416 void __iomem *addr = vmd_cfg_addr(vmd, bus, devfn, reg, len);
417 unsigned long flags;
418 int ret = 0;
419
420 if (!addr)
421 return -EFAULT;
422
423 spin_lock_irqsave(&vmd->cfg_lock, flags);
424 switch (len) {
425 case 1:
426 writeb(value, addr);
427 readb(addr);
428 break;
429 case 2:
430 writew(value, addr);
431 readw(addr);
432 break;
433 case 4:
434 writel(value, addr);
435 readl(addr);
436 break;
437 default:
438 ret = -EINVAL;
439 break;
440 }
441 spin_unlock_irqrestore(&vmd->cfg_lock, flags);
442 return ret;
443}
444
445static struct pci_ops vmd_ops = {
446 .read = vmd_pci_read,
447 .write = vmd_pci_write,
448};
449
450static void vmd_attach_resources(struct vmd_dev *vmd)
451{
452 vmd->dev->resource[VMD_MEMBAR1].child = &vmd->resources[1];
453 vmd->dev->resource[VMD_MEMBAR2].child = &vmd->resources[2];
454}
455
456static void vmd_detach_resources(struct vmd_dev *vmd)
457{
458 vmd->dev->resource[VMD_MEMBAR1].child = NULL;
459 vmd->dev->resource[VMD_MEMBAR2].child = NULL;
460}
461
462/*
463 * VMD domains start at 0x10000 to not clash with ACPI _SEG domains.
464 * Per ACPI r6.0, sec 6.5.6, _SEG returns an integer, of which the lower
465 * 16 bits are the PCI Segment Group (domain) number. Other bits are
466 * currently reserved.
467 */
468static int vmd_find_free_domain(void)
469{
470 int domain = 0xffff;
471 struct pci_bus *bus = NULL;
472
473 while ((bus = pci_find_next_bus(bus)) != NULL)
474 domain = max_t(int, domain, pci_domain_nr(bus));
475 return domain + 1;
476}
477
478static int vmd_get_phys_offsets(struct vmd_dev *vmd, bool native_hint,
479 resource_size_t *offset1,
480 resource_size_t *offset2)
481{
482 struct pci_dev *dev = vmd->dev;
483 u64 phys1, phys2;
484
485 if (native_hint) {
486 u32 vmlock;
487 int ret;
488
489 ret = pci_read_config_dword(dev, PCI_REG_VMLOCK, &vmlock);
490 if (ret || vmlock == ~0)
491 return -ENODEV;
492
493 if (MB2_SHADOW_EN(vmlock)) {
494 void __iomem *membar2;
495
496 membar2 = pci_iomap(dev, VMD_MEMBAR2, 0);
497 if (!membar2)
498 return -ENOMEM;
499 phys1 = readq(membar2 + MB2_SHADOW_OFFSET);
500 phys2 = readq(membar2 + MB2_SHADOW_OFFSET + 8);
501 pci_iounmap(dev, membar2);
502 } else
503 return 0;
504 } else {
505 /* Hypervisor-Emulated Vendor-Specific Capability */
506 int pos = pci_find_capability(dev, PCI_CAP_ID_VNDR);
507 u32 reg, regu;
508
509 pci_read_config_dword(dev, pos + 4, ®);
510
511 /* "SHDW" */
512 if (pos && reg == 0x53484457) {
513 pci_read_config_dword(dev, pos + 8, ®);
514 pci_read_config_dword(dev, pos + 12, ®u);
515 phys1 = (u64) regu << 32 | reg;
516
517 pci_read_config_dword(dev, pos + 16, ®);
518 pci_read_config_dword(dev, pos + 20, ®u);
519 phys2 = (u64) regu << 32 | reg;
520 } else
521 return 0;
522 }
523
524 *offset1 = dev->resource[VMD_MEMBAR1].start -
525 (phys1 & PCI_BASE_ADDRESS_MEM_MASK);
526 *offset2 = dev->resource[VMD_MEMBAR2].start -
527 (phys2 & PCI_BASE_ADDRESS_MEM_MASK);
528
529 return 0;
530}
531
532static int vmd_get_bus_number_start(struct vmd_dev *vmd)
533{
534 struct pci_dev *dev = vmd->dev;
535 u16 reg;
536
537 pci_read_config_word(dev, PCI_REG_VMCAP, ®);
538 if (BUS_RESTRICT_CAP(reg)) {
539 pci_read_config_word(dev, PCI_REG_VMCONFIG, ®);
540
541 switch (BUS_RESTRICT_CFG(reg)) {
542 case 0:
543 vmd->busn_start = 0;
544 break;
545 case 1:
546 vmd->busn_start = 128;
547 break;
548 case 2:
549 vmd->busn_start = 224;
550 break;
551 default:
552 pci_err(dev, "Unknown Bus Offset Setting (%d)\n",
553 BUS_RESTRICT_CFG(reg));
554 return -ENODEV;
555 }
556 }
557
558 return 0;
559}
560
561static irqreturn_t vmd_irq(int irq, void *data)
562{
563 struct vmd_irq_list *irqs = data;
564 struct vmd_irq *vmdirq;
565 int idx;
566
567 idx = srcu_read_lock(&irqs->srcu);
568 list_for_each_entry_rcu(vmdirq, &irqs->irq_list, node)
569 generic_handle_irq(vmdirq->virq);
570 srcu_read_unlock(&irqs->srcu, idx);
571
572 return IRQ_HANDLED;
573}
574
575static int vmd_alloc_irqs(struct vmd_dev *vmd)
576{
577 struct pci_dev *dev = vmd->dev;
578 int i, err;
579
580 vmd->msix_count = pci_msix_vec_count(dev);
581 if (vmd->msix_count < 0)
582 return -ENODEV;
583
584 vmd->msix_count = pci_alloc_irq_vectors(dev, vmd->first_vec + 1,
585 vmd->msix_count, PCI_IRQ_MSIX);
586 if (vmd->msix_count < 0)
587 return vmd->msix_count;
588
589 vmd->irqs = devm_kcalloc(&dev->dev, vmd->msix_count, sizeof(*vmd->irqs),
590 GFP_KERNEL);
591 if (!vmd->irqs)
592 return -ENOMEM;
593
594 for (i = 0; i < vmd->msix_count; i++) {
595 err = init_srcu_struct(&vmd->irqs[i].srcu);
596 if (err)
597 return err;
598
599 INIT_LIST_HEAD(&vmd->irqs[i].irq_list);
600 err = devm_request_irq(&dev->dev, pci_irq_vector(dev, i),
601 vmd_irq, IRQF_NO_THREAD,
602 "vmd", &vmd->irqs[i]);
603 if (err)
604 return err;
605 }
606
607 return 0;
608}
609
610static int vmd_enable_domain(struct vmd_dev *vmd, unsigned long features)
611{
612 struct pci_sysdata *sd = &vmd->sysdata;
613 struct resource *res;
614 u32 upper_bits;
615 unsigned long flags;
616 LIST_HEAD(resources);
617 resource_size_t offset[2] = {0};
618 resource_size_t membar2_offset = 0x2000;
619 struct pci_bus *child;
620 int ret;
621
622 /*
623 * Shadow registers may exist in certain VMD device ids which allow
624 * guests to correctly assign host physical addresses to the root ports
625 * and child devices. These registers will either return the host value
626 * or 0, depending on an enable bit in the VMD device.
627 */
628 if (features & VMD_FEAT_HAS_MEMBAR_SHADOW) {
629 membar2_offset = MB2_SHADOW_OFFSET + MB2_SHADOW_SIZE;
630 ret = vmd_get_phys_offsets(vmd, true, &offset[0], &offset[1]);
631 if (ret)
632 return ret;
633 } else if (features & VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP) {
634 ret = vmd_get_phys_offsets(vmd, false, &offset[0], &offset[1]);
635 if (ret)
636 return ret;
637 }
638
639 /*
640 * Certain VMD devices may have a root port configuration option which
641 * limits the bus range to between 0-127, 128-255, or 224-255
642 */
643 if (features & VMD_FEAT_HAS_BUS_RESTRICTIONS) {
644 ret = vmd_get_bus_number_start(vmd);
645 if (ret)
646 return ret;
647 }
648
649 res = &vmd->dev->resource[VMD_CFGBAR];
650 vmd->resources[0] = (struct resource) {
651 .name = "VMD CFGBAR",
652 .start = vmd->busn_start,
653 .end = vmd->busn_start + (resource_size(res) >> 20) - 1,
654 .flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED,
655 };
656
657 /*
658 * If the window is below 4GB, clear IORESOURCE_MEM_64 so we can
659 * put 32-bit resources in the window.
660 *
661 * There's no hardware reason why a 64-bit window *couldn't*
662 * contain a 32-bit resource, but pbus_size_mem() computes the
663 * bridge window size assuming a 64-bit window will contain no
664 * 32-bit resources. __pci_assign_resource() enforces that
665 * artificial restriction to make sure everything will fit.
666 *
667 * The only way we could use a 64-bit non-prefetchable MEMBAR is
668 * if its address is <4GB so that we can convert it to a 32-bit
669 * resource. To be visible to the host OS, all VMD endpoints must
670 * be initially configured by platform BIOS, which includes setting
671 * up these resources. We can assume the device is configured
672 * according to the platform needs.
673 */
674 res = &vmd->dev->resource[VMD_MEMBAR1];
675 upper_bits = upper_32_bits(res->end);
676 flags = res->flags & ~IORESOURCE_SIZEALIGN;
677 if (!upper_bits)
678 flags &= ~IORESOURCE_MEM_64;
679 vmd->resources[1] = (struct resource) {
680 .name = "VMD MEMBAR1",
681 .start = res->start,
682 .end = res->end,
683 .flags = flags,
684 .parent = res,
685 };
686
687 res = &vmd->dev->resource[VMD_MEMBAR2];
688 upper_bits = upper_32_bits(res->end);
689 flags = res->flags & ~IORESOURCE_SIZEALIGN;
690 if (!upper_bits)
691 flags &= ~IORESOURCE_MEM_64;
692 vmd->resources[2] = (struct resource) {
693 .name = "VMD MEMBAR2",
694 .start = res->start + membar2_offset,
695 .end = res->end,
696 .flags = flags,
697 .parent = res,
698 };
699
700 sd->vmd_dev = vmd->dev;
701 sd->domain = vmd_find_free_domain();
702 if (sd->domain < 0)
703 return sd->domain;
704
705 sd->node = pcibus_to_node(vmd->dev->bus);
706
707 /*
708 * Currently MSI remapping must be enabled in guest passthrough mode
709 * due to some missing interrupt remapping plumbing. This is probably
710 * acceptable because the guest is usually CPU-limited and MSI
711 * remapping doesn't become a performance bottleneck.
712 */
713 if (!(features & VMD_FEAT_CAN_BYPASS_MSI_REMAP) ||
714 offset[0] || offset[1]) {
715 ret = vmd_alloc_irqs(vmd);
716 if (ret)
717 return ret;
718
719 vmd_set_msi_remapping(vmd, true);
720
721 ret = vmd_create_irq_domain(vmd);
722 if (ret)
723 return ret;
724
725 /*
726 * Override the IRQ domain bus token so the domain can be
727 * distinguished from a regular PCI/MSI domain.
728 */
729 irq_domain_update_bus_token(vmd->irq_domain, DOMAIN_BUS_VMD_MSI);
730 } else {
731 vmd_set_msi_remapping(vmd, false);
732 }
733
734 pci_add_resource(&resources, &vmd->resources[0]);
735 pci_add_resource_offset(&resources, &vmd->resources[1], offset[0]);
736 pci_add_resource_offset(&resources, &vmd->resources[2], offset[1]);
737
738 vmd->bus = pci_create_root_bus(&vmd->dev->dev, vmd->busn_start,
739 &vmd_ops, sd, &resources);
740 if (!vmd->bus) {
741 pci_free_resource_list(&resources);
742 vmd_remove_irq_domain(vmd);
743 return -ENODEV;
744 }
745
746 vmd_attach_resources(vmd);
747 if (vmd->irq_domain)
748 dev_set_msi_domain(&vmd->bus->dev, vmd->irq_domain);
749
750 pci_scan_child_bus(vmd->bus);
751 pci_assign_unassigned_bus_resources(vmd->bus);
752
753 /*
754 * VMD root buses are virtual and don't return true on pci_is_pcie()
755 * and will fail pcie_bus_configure_settings() early. It can instead be
756 * run on each of the real root ports.
757 */
758 list_for_each_entry(child, &vmd->bus->children, node)
759 pcie_bus_configure_settings(child);
760
761 pci_bus_add_devices(vmd->bus);
762
763 WARN(sysfs_create_link(&vmd->dev->dev.kobj, &vmd->bus->dev.kobj,
764 "domain"), "Can't create symlink to domain\n");
765 return 0;
766}
767
768static int vmd_probe(struct pci_dev *dev, const struct pci_device_id *id)
769{
770 unsigned long features = (unsigned long) id->driver_data;
771 struct vmd_dev *vmd;
772 int err;
773
774 if (resource_size(&dev->resource[VMD_CFGBAR]) < (1 << 20))
775 return -ENOMEM;
776
777 vmd = devm_kzalloc(&dev->dev, sizeof(*vmd), GFP_KERNEL);
778 if (!vmd)
779 return -ENOMEM;
780
781 vmd->dev = dev;
782 err = pcim_enable_device(dev);
783 if (err < 0)
784 return err;
785
786 vmd->cfgbar = pcim_iomap(dev, VMD_CFGBAR, 0);
787 if (!vmd->cfgbar)
788 return -ENOMEM;
789
790 pci_set_master(dev);
791 if (dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(64)) &&
792 dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)))
793 return -ENODEV;
794
795 if (features & VMD_FEAT_OFFSET_FIRST_VECTOR)
796 vmd->first_vec = 1;
797
798 spin_lock_init(&vmd->cfg_lock);
799 pci_set_drvdata(dev, vmd);
800 err = vmd_enable_domain(vmd, features);
801 if (err)
802 return err;
803
804 dev_info(&vmd->dev->dev, "Bound to PCI domain %04x\n",
805 vmd->sysdata.domain);
806 return 0;
807}
808
809static void vmd_cleanup_srcu(struct vmd_dev *vmd)
810{
811 int i;
812
813 for (i = 0; i < vmd->msix_count; i++)
814 cleanup_srcu_struct(&vmd->irqs[i].srcu);
815}
816
817static void vmd_remove(struct pci_dev *dev)
818{
819 struct vmd_dev *vmd = pci_get_drvdata(dev);
820
821 sysfs_remove_link(&vmd->dev->dev.kobj, "domain");
822 pci_stop_root_bus(vmd->bus);
823 pci_remove_root_bus(vmd->bus);
824 vmd_cleanup_srcu(vmd);
825 vmd_detach_resources(vmd);
826 vmd_remove_irq_domain(vmd);
827}
828
829#ifdef CONFIG_PM_SLEEP
830static int vmd_suspend(struct device *dev)
831{
832 struct pci_dev *pdev = to_pci_dev(dev);
833 struct vmd_dev *vmd = pci_get_drvdata(pdev);
834 int i;
835
836 for (i = 0; i < vmd->msix_count; i++)
837 devm_free_irq(dev, pci_irq_vector(pdev, i), &vmd->irqs[i]);
838
839 return 0;
840}
841
842static int vmd_resume(struct device *dev)
843{
844 struct pci_dev *pdev = to_pci_dev(dev);
845 struct vmd_dev *vmd = pci_get_drvdata(pdev);
846 int err, i;
847
848 for (i = 0; i < vmd->msix_count; i++) {
849 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
850 vmd_irq, IRQF_NO_THREAD,
851 "vmd", &vmd->irqs[i]);
852 if (err)
853 return err;
854 }
855
856 return 0;
857}
858#endif
859static SIMPLE_DEV_PM_OPS(vmd_dev_pm_ops, vmd_suspend, vmd_resume);
860
861static const struct pci_device_id vmd_ids[] = {
862 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_201D),
863 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP,},
864 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_28C0),
865 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW |
866 VMD_FEAT_HAS_BUS_RESTRICTIONS |
867 VMD_FEAT_CAN_BYPASS_MSI_REMAP,},
868 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x467f),
869 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
870 VMD_FEAT_HAS_BUS_RESTRICTIONS |
871 VMD_FEAT_OFFSET_FIRST_VECTOR,},
872 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x4c3d),
873 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
874 VMD_FEAT_HAS_BUS_RESTRICTIONS |
875 VMD_FEAT_OFFSET_FIRST_VECTOR,},
876 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_VMD_9A0B),
877 .driver_data = VMD_FEAT_HAS_MEMBAR_SHADOW_VSCAP |
878 VMD_FEAT_HAS_BUS_RESTRICTIONS |
879 VMD_FEAT_OFFSET_FIRST_VECTOR,},
880 {0,}
881};
882MODULE_DEVICE_TABLE(pci, vmd_ids);
883
884static struct pci_driver vmd_drv = {
885 .name = "vmd",
886 .id_table = vmd_ids,
887 .probe = vmd_probe,
888 .remove = vmd_remove,
889 .driver = {
890 .pm = &vmd_dev_pm_ops,
891 },
892};
893module_pci_driver(vmd_drv);
894
895MODULE_AUTHOR("Intel Corporation");
896MODULE_LICENSE("GPL v2");
897MODULE_VERSION("0.6");