Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio PCI driver - common functionality for all device versions
4 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
10 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
15 */
16
17#include "virtio_pci_common.h"
18
19static bool force_legacy = false;
20
21#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
22module_param(force_legacy, bool, 0444);
23MODULE_PARM_DESC(force_legacy,
24 "Force legacy mode for transitional virtio 1 devices");
25#endif
26
27/* wait for pending irq handlers */
28void vp_synchronize_vectors(struct virtio_device *vdev)
29{
30 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
31 int i;
32
33 if (vp_dev->intx_enabled)
34 synchronize_irq(vp_dev->pci_dev->irq);
35
36 for (i = 0; i < vp_dev->msix_vectors; ++i)
37 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
38}
39
40/* the notify function used when creating a virt queue */
41bool vp_notify(struct virtqueue *vq)
42{
43 /* we write the queue's selector into the notification register to
44 * signal the other end */
45 iowrite16(vq->index, (void __iomem *)vq->priv);
46 return true;
47}
48
49/* Handle a configuration change: Tell driver if it wants to know. */
50static irqreturn_t vp_config_changed(int irq, void *opaque)
51{
52 struct virtio_pci_device *vp_dev = opaque;
53
54 virtio_config_changed(&vp_dev->vdev);
55 return IRQ_HANDLED;
56}
57
58/* Notify all virtqueues on an interrupt. */
59static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
60{
61 struct virtio_pci_device *vp_dev = opaque;
62 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE;
64 unsigned long flags;
65
66 spin_lock_irqsave(&vp_dev->lock, flags);
67 list_for_each_entry(info, &vp_dev->virtqueues, node) {
68 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
69 ret = IRQ_HANDLED;
70 }
71 spin_unlock_irqrestore(&vp_dev->lock, flags);
72
73 return ret;
74}
75
76/* A small wrapper to also acknowledge the interrupt when it's handled.
77 * I really need an EIO hook for the vring so I can ack the interrupt once we
78 * know that we'll be handling the IRQ but before we invoke the callback since
79 * the callback may notify the host which results in the host attempting to
80 * raise an interrupt that we would then mask once we acknowledged the
81 * interrupt. */
82static irqreturn_t vp_interrupt(int irq, void *opaque)
83{
84 struct virtio_pci_device *vp_dev = opaque;
85 u8 isr;
86
87 /* reading the ISR has the effect of also clearing it so it's very
88 * important to save off the value. */
89 isr = ioread8(vp_dev->isr);
90
91 /* It's definitely not us if the ISR was not high */
92 if (!isr)
93 return IRQ_NONE;
94
95 /* Configuration change? Tell driver if it wants to know. */
96 if (isr & VIRTIO_PCI_ISR_CONFIG)
97 vp_config_changed(irq, opaque);
98
99 return vp_vring_interrupt(irq, opaque);
100}
101
102static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
103 bool per_vq_vectors, struct irq_affinity *desc)
104{
105 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
106 const char *name = dev_name(&vp_dev->vdev.dev);
107 unsigned int flags = PCI_IRQ_MSIX;
108 unsigned int i, v;
109 int err = -ENOMEM;
110
111 vp_dev->msix_vectors = nvectors;
112
113 vp_dev->msix_names = kmalloc_array(nvectors,
114 sizeof(*vp_dev->msix_names),
115 GFP_KERNEL);
116 if (!vp_dev->msix_names)
117 goto error;
118 vp_dev->msix_affinity_masks
119 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
120 GFP_KERNEL);
121 if (!vp_dev->msix_affinity_masks)
122 goto error;
123 for (i = 0; i < nvectors; ++i)
124 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
125 GFP_KERNEL))
126 goto error;
127
128 if (desc) {
129 flags |= PCI_IRQ_AFFINITY;
130 desc->pre_vectors++; /* virtio config vector */
131 }
132
133 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
134 nvectors, flags, desc);
135 if (err < 0)
136 goto error;
137 vp_dev->msix_enabled = 1;
138
139 /* Set the vector used for configuration */
140 v = vp_dev->msix_used_vectors;
141 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
142 "%s-config", name);
143 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
144 vp_config_changed, 0, vp_dev->msix_names[v],
145 vp_dev);
146 if (err)
147 goto error;
148 ++vp_dev->msix_used_vectors;
149
150 v = vp_dev->config_vector(vp_dev, v);
151 /* Verify we had enough resources to assign the vector */
152 if (v == VIRTIO_MSI_NO_VECTOR) {
153 err = -EBUSY;
154 goto error;
155 }
156
157 if (!per_vq_vectors) {
158 /* Shared vector for all VQs */
159 v = vp_dev->msix_used_vectors;
160 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
161 "%s-virtqueues", name);
162 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
163 vp_vring_interrupt, 0, vp_dev->msix_names[v],
164 vp_dev);
165 if (err)
166 goto error;
167 ++vp_dev->msix_used_vectors;
168 }
169 return 0;
170error:
171 return err;
172}
173
174static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned int index,
175 void (*callback)(struct virtqueue *vq),
176 const char *name,
177 bool ctx,
178 u16 msix_vec)
179{
180 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
181 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
182 struct virtqueue *vq;
183 unsigned long flags;
184
185 /* fill out our structure that represents an active queue */
186 if (!info)
187 return ERR_PTR(-ENOMEM);
188
189 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
190 msix_vec);
191 if (IS_ERR(vq))
192 goto out_info;
193
194 info->vq = vq;
195 if (callback) {
196 spin_lock_irqsave(&vp_dev->lock, flags);
197 list_add(&info->node, &vp_dev->virtqueues);
198 spin_unlock_irqrestore(&vp_dev->lock, flags);
199 } else {
200 INIT_LIST_HEAD(&info->node);
201 }
202
203 vp_dev->vqs[index] = info;
204 return vq;
205
206out_info:
207 kfree(info);
208 return vq;
209}
210
211static void vp_del_vq(struct virtqueue *vq)
212{
213 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
215 unsigned long flags;
216
217 /*
218 * If it fails during re-enable reset vq. This way we won't rejoin
219 * info->node to the queue. Prevent unexpected irqs.
220 */
221 if (!vq->reset) {
222 spin_lock_irqsave(&vp_dev->lock, flags);
223 list_del(&info->node);
224 spin_unlock_irqrestore(&vp_dev->lock, flags);
225 }
226
227 vp_dev->del_vq(info);
228 kfree(info);
229}
230
231/* the config->del_vqs() implementation */
232void vp_del_vqs(struct virtio_device *vdev)
233{
234 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
235 struct virtqueue *vq, *n;
236 int i;
237
238 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
239 if (vp_dev->is_avq(vdev, vq->index))
240 continue;
241
242 if (vp_dev->per_vq_vectors) {
243 int v = vp_dev->vqs[vq->index]->msix_vector;
244
245 if (v != VIRTIO_MSI_NO_VECTOR) {
246 int irq = pci_irq_vector(vp_dev->pci_dev, v);
247
248 irq_update_affinity_hint(irq, NULL);
249 free_irq(irq, vq);
250 }
251 }
252 vp_del_vq(vq);
253 }
254 vp_dev->per_vq_vectors = false;
255
256 if (vp_dev->intx_enabled) {
257 free_irq(vp_dev->pci_dev->irq, vp_dev);
258 vp_dev->intx_enabled = 0;
259 }
260
261 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
262 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
263
264 if (vp_dev->msix_affinity_masks) {
265 for (i = 0; i < vp_dev->msix_vectors; i++)
266 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
267 }
268
269 if (vp_dev->msix_enabled) {
270 /* Disable the vector used for configuration */
271 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
272
273 pci_free_irq_vectors(vp_dev->pci_dev);
274 vp_dev->msix_enabled = 0;
275 }
276
277 vp_dev->msix_vectors = 0;
278 vp_dev->msix_used_vectors = 0;
279 kfree(vp_dev->msix_names);
280 vp_dev->msix_names = NULL;
281 kfree(vp_dev->msix_affinity_masks);
282 vp_dev->msix_affinity_masks = NULL;
283 kfree(vp_dev->vqs);
284 vp_dev->vqs = NULL;
285}
286
287static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned int nvqs,
288 struct virtqueue *vqs[], vq_callback_t *callbacks[],
289 const char * const names[], bool per_vq_vectors,
290 const bool *ctx,
291 struct irq_affinity *desc)
292{
293 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
294 u16 msix_vec;
295 int i, err, nvectors, allocated_vectors, queue_idx = 0;
296
297 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
298 if (!vp_dev->vqs)
299 return -ENOMEM;
300
301 if (per_vq_vectors) {
302 /* Best option: one for change interrupt, one per vq. */
303 nvectors = 1;
304 for (i = 0; i < nvqs; ++i)
305 if (names[i] && callbacks[i])
306 ++nvectors;
307 } else {
308 /* Second best: one for change, shared for all vqs. */
309 nvectors = 2;
310 }
311
312 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
313 per_vq_vectors ? desc : NULL);
314 if (err)
315 goto error_find;
316
317 vp_dev->per_vq_vectors = per_vq_vectors;
318 allocated_vectors = vp_dev->msix_used_vectors;
319 for (i = 0; i < nvqs; ++i) {
320 if (!names[i]) {
321 vqs[i] = NULL;
322 continue;
323 }
324
325 if (!callbacks[i])
326 msix_vec = VIRTIO_MSI_NO_VECTOR;
327 else if (vp_dev->per_vq_vectors)
328 msix_vec = allocated_vectors++;
329 else
330 msix_vec = VP_MSIX_VQ_VECTOR;
331 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
332 ctx ? ctx[i] : false,
333 msix_vec);
334 if (IS_ERR(vqs[i])) {
335 err = PTR_ERR(vqs[i]);
336 goto error_find;
337 }
338
339 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
340 continue;
341
342 /* allocate per-vq irq if available and necessary */
343 snprintf(vp_dev->msix_names[msix_vec],
344 sizeof *vp_dev->msix_names,
345 "%s-%s",
346 dev_name(&vp_dev->vdev.dev), names[i]);
347 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
348 vring_interrupt, 0,
349 vp_dev->msix_names[msix_vec],
350 vqs[i]);
351 if (err)
352 goto error_find;
353 }
354 return 0;
355
356error_find:
357 vp_del_vqs(vdev);
358 return err;
359}
360
361static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned int nvqs,
362 struct virtqueue *vqs[], vq_callback_t *callbacks[],
363 const char * const names[], const bool *ctx)
364{
365 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
366 int i, err, queue_idx = 0;
367
368 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
369 if (!vp_dev->vqs)
370 return -ENOMEM;
371
372 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
373 dev_name(&vdev->dev), vp_dev);
374 if (err)
375 goto out_del_vqs;
376
377 vp_dev->intx_enabled = 1;
378 vp_dev->per_vq_vectors = false;
379 for (i = 0; i < nvqs; ++i) {
380 if (!names[i]) {
381 vqs[i] = NULL;
382 continue;
383 }
384 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
385 ctx ? ctx[i] : false,
386 VIRTIO_MSI_NO_VECTOR);
387 if (IS_ERR(vqs[i])) {
388 err = PTR_ERR(vqs[i]);
389 goto out_del_vqs;
390 }
391 }
392
393 return 0;
394out_del_vqs:
395 vp_del_vqs(vdev);
396 return err;
397}
398
399/* the config->find_vqs() implementation */
400int vp_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
401 struct virtqueue *vqs[], vq_callback_t *callbacks[],
402 const char * const names[], const bool *ctx,
403 struct irq_affinity *desc)
404{
405 int err;
406
407 /* Try MSI-X with one vector per queue. */
408 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
409 if (!err)
410 return 0;
411 /* Fallback: MSI-X with one vector for config, one shared for queues. */
412 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
413 if (!err)
414 return 0;
415 /* Is there an interrupt? If not give up. */
416 if (!(to_vp_device(vdev)->pci_dev->irq))
417 return err;
418 /* Finally fall back to regular interrupts. */
419 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
420}
421
422const char *vp_bus_name(struct virtio_device *vdev)
423{
424 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
425
426 return pci_name(vp_dev->pci_dev);
427}
428
429/* Setup the affinity for a virtqueue:
430 * - force the affinity for per vq vector
431 * - OR over all affinities for shared MSI
432 * - ignore the affinity request if we're using INTX
433 */
434int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
435{
436 struct virtio_device *vdev = vq->vdev;
437 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
438 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
439 struct cpumask *mask;
440 unsigned int irq;
441
442 if (!vq->callback)
443 return -EINVAL;
444
445 if (vp_dev->msix_enabled) {
446 mask = vp_dev->msix_affinity_masks[info->msix_vector];
447 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
448 if (!cpu_mask)
449 irq_update_affinity_hint(irq, NULL);
450 else {
451 cpumask_copy(mask, cpu_mask);
452 irq_set_affinity_and_hint(irq, mask);
453 }
454 }
455 return 0;
456}
457
458const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
459{
460 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
461
462 if (!vp_dev->per_vq_vectors ||
463 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
464 return NULL;
465
466 return pci_irq_get_affinity(vp_dev->pci_dev,
467 vp_dev->vqs[index]->msix_vector);
468}
469
470#ifdef CONFIG_PM_SLEEP
471static int virtio_pci_freeze(struct device *dev)
472{
473 struct pci_dev *pci_dev = to_pci_dev(dev);
474 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
475 int ret;
476
477 ret = virtio_device_freeze(&vp_dev->vdev);
478
479 if (!ret)
480 pci_disable_device(pci_dev);
481 return ret;
482}
483
484static int virtio_pci_restore(struct device *dev)
485{
486 struct pci_dev *pci_dev = to_pci_dev(dev);
487 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
488 int ret;
489
490 ret = pci_enable_device(pci_dev);
491 if (ret)
492 return ret;
493
494 pci_set_master(pci_dev);
495 return virtio_device_restore(&vp_dev->vdev);
496}
497
498static bool vp_supports_pm_no_reset(struct device *dev)
499{
500 struct pci_dev *pci_dev = to_pci_dev(dev);
501 u16 pmcsr;
502
503 if (!pci_dev->pm_cap)
504 return false;
505
506 pci_read_config_word(pci_dev, pci_dev->pm_cap + PCI_PM_CTRL, &pmcsr);
507 if (PCI_POSSIBLE_ERROR(pmcsr)) {
508 dev_err(dev, "Unable to query pmcsr");
509 return false;
510 }
511
512 return pmcsr & PCI_PM_CTRL_NO_SOFT_RESET;
513}
514
515static int virtio_pci_suspend(struct device *dev)
516{
517 return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_freeze(dev);
518}
519
520static int virtio_pci_resume(struct device *dev)
521{
522 return vp_supports_pm_no_reset(dev) ? 0 : virtio_pci_restore(dev);
523}
524
525static const struct dev_pm_ops virtio_pci_pm_ops = {
526 .suspend = virtio_pci_suspend,
527 .resume = virtio_pci_resume,
528 .freeze = virtio_pci_freeze,
529 .thaw = virtio_pci_restore,
530 .poweroff = virtio_pci_freeze,
531 .restore = virtio_pci_restore,
532};
533#endif
534
535
536/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
537static const struct pci_device_id virtio_pci_id_table[] = {
538 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
539 { 0 }
540};
541
542MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
543
544static void virtio_pci_release_dev(struct device *_d)
545{
546 struct virtio_device *vdev = dev_to_virtio(_d);
547 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
548
549 /* As struct device is a kobject, it's not safe to
550 * free the memory (including the reference counter itself)
551 * until it's release callback. */
552 kfree(vp_dev);
553}
554
555static int virtio_pci_probe(struct pci_dev *pci_dev,
556 const struct pci_device_id *id)
557{
558 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
559 int rc;
560
561 /* allocate our structure and fill it out */
562 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
563 if (!vp_dev)
564 return -ENOMEM;
565
566 pci_set_drvdata(pci_dev, vp_dev);
567 vp_dev->vdev.dev.parent = &pci_dev->dev;
568 vp_dev->vdev.dev.release = virtio_pci_release_dev;
569 vp_dev->pci_dev = pci_dev;
570 INIT_LIST_HEAD(&vp_dev->virtqueues);
571 spin_lock_init(&vp_dev->lock);
572
573 /* enable the device */
574 rc = pci_enable_device(pci_dev);
575 if (rc)
576 goto err_enable_device;
577
578 if (force_legacy) {
579 rc = virtio_pci_legacy_probe(vp_dev);
580 /* Also try modern mode if we can't map BAR0 (no IO space). */
581 if (rc == -ENODEV || rc == -ENOMEM)
582 rc = virtio_pci_modern_probe(vp_dev);
583 if (rc)
584 goto err_probe;
585 } else {
586 rc = virtio_pci_modern_probe(vp_dev);
587 if (rc == -ENODEV)
588 rc = virtio_pci_legacy_probe(vp_dev);
589 if (rc)
590 goto err_probe;
591 }
592
593 pci_set_master(pci_dev);
594
595 rc = register_virtio_device(&vp_dev->vdev);
596 reg_dev = vp_dev;
597 if (rc)
598 goto err_register;
599
600 return 0;
601
602err_register:
603 if (vp_dev->is_legacy)
604 virtio_pci_legacy_remove(vp_dev);
605 else
606 virtio_pci_modern_remove(vp_dev);
607err_probe:
608 pci_disable_device(pci_dev);
609err_enable_device:
610 if (reg_dev)
611 put_device(&vp_dev->vdev.dev);
612 else
613 kfree(vp_dev);
614 return rc;
615}
616
617static void virtio_pci_remove(struct pci_dev *pci_dev)
618{
619 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
620 struct device *dev = get_device(&vp_dev->vdev.dev);
621
622 /*
623 * Device is marked broken on surprise removal so that virtio upper
624 * layers can abort any ongoing operation.
625 */
626 if (!pci_device_is_present(pci_dev))
627 virtio_break_device(&vp_dev->vdev);
628
629 pci_disable_sriov(pci_dev);
630
631 unregister_virtio_device(&vp_dev->vdev);
632
633 if (vp_dev->is_legacy)
634 virtio_pci_legacy_remove(vp_dev);
635 else
636 virtio_pci_modern_remove(vp_dev);
637
638 pci_disable_device(pci_dev);
639 put_device(dev);
640}
641
642static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
643{
644 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
645 struct virtio_device *vdev = &vp_dev->vdev;
646 int ret;
647
648 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
649 return -EBUSY;
650
651 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
652 return -EINVAL;
653
654 if (pci_vfs_assigned(pci_dev))
655 return -EPERM;
656
657 if (num_vfs == 0) {
658 pci_disable_sriov(pci_dev);
659 return 0;
660 }
661
662 ret = pci_enable_sriov(pci_dev, num_vfs);
663 if (ret < 0)
664 return ret;
665
666 return num_vfs;
667}
668
669static struct pci_driver virtio_pci_driver = {
670 .name = "virtio-pci",
671 .id_table = virtio_pci_id_table,
672 .probe = virtio_pci_probe,
673 .remove = virtio_pci_remove,
674#ifdef CONFIG_PM_SLEEP
675 .driver.pm = &virtio_pci_pm_ops,
676#endif
677 .sriov_configure = virtio_pci_sriov_configure,
678};
679
680struct virtio_device *virtio_pci_vf_get_pf_dev(struct pci_dev *pdev)
681{
682 struct virtio_pci_device *pf_vp_dev;
683
684 pf_vp_dev = pci_iov_get_pf_drvdata(pdev, &virtio_pci_driver);
685 if (IS_ERR(pf_vp_dev))
686 return NULL;
687
688 return &pf_vp_dev->vdev;
689}
690
691module_pci_driver(virtio_pci_driver);
692
693MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
694MODULE_DESCRIPTION("virtio-pci");
695MODULE_LICENSE("GPL");
696MODULE_VERSION("1");
1/*
2 * Virtio PCI driver - common functionality for all device versions
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20#include "virtio_pci_common.h"
21
22static bool force_legacy = false;
23
24#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25module_param(force_legacy, bool, 0444);
26MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
28#endif
29
30/* wait for pending irq handlers */
31void vp_synchronize_vectors(struct virtio_device *vdev)
32{
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i;
35
36 if (vp_dev->intx_enabled)
37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
40 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
41}
42
43/* the notify function used when creating a virt queue */
44bool vp_notify(struct virtqueue *vq)
45{
46 /* we write the queue's selector into the notification register to
47 * signal the other end */
48 iowrite16(vq->index, (void __iomem *)vq->priv);
49 return true;
50}
51
52/* Handle a configuration change: Tell driver if it wants to know. */
53static irqreturn_t vp_config_changed(int irq, void *opaque)
54{
55 struct virtio_pci_device *vp_dev = opaque;
56
57 virtio_config_changed(&vp_dev->vdev);
58 return IRQ_HANDLED;
59}
60
61/* Notify all virtqueues on an interrupt. */
62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
63{
64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
66 irqreturn_t ret = IRQ_NONE;
67 unsigned long flags;
68
69 spin_lock_irqsave(&vp_dev->lock, flags);
70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
72 ret = IRQ_HANDLED;
73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
75
76 return ret;
77}
78
79/* A small wrapper to also acknowledge the interrupt when it's handled.
80 * I really need an EIO hook for the vring so I can ack the interrupt once we
81 * know that we'll be handling the IRQ but before we invoke the callback since
82 * the callback may notify the host which results in the host attempting to
83 * raise an interrupt that we would then mask once we acknowledged the
84 * interrupt. */
85static irqreturn_t vp_interrupt(int irq, void *opaque)
86{
87 struct virtio_pci_device *vp_dev = opaque;
88 u8 isr;
89
90 /* reading the ISR has the effect of also clearing it so it's very
91 * important to save off the value. */
92 isr = ioread8(vp_dev->isr);
93
94 /* It's definitely not us if the ISR was not high */
95 if (!isr)
96 return IRQ_NONE;
97
98 /* Configuration change? Tell driver if it wants to know. */
99 if (isr & VIRTIO_PCI_ISR_CONFIG)
100 vp_config_changed(irq, opaque);
101
102 return vp_vring_interrupt(irq, opaque);
103}
104
105static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
107{
108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned flags = PCI_IRQ_MSIX;
111 unsigned i, v;
112 int err = -ENOMEM;
113
114 vp_dev->msix_vectors = nvectors;
115
116 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
117 GFP_KERNEL);
118 if (!vp_dev->msix_names)
119 goto error;
120 vp_dev->msix_affinity_masks
121 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
122 GFP_KERNEL);
123 if (!vp_dev->msix_affinity_masks)
124 goto error;
125 for (i = 0; i < nvectors; ++i)
126 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
127 GFP_KERNEL))
128 goto error;
129
130 if (desc) {
131 flags |= PCI_IRQ_AFFINITY;
132 desc->pre_vectors++; /* virtio config vector */
133 }
134
135 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
136 nvectors, flags, desc);
137 if (err < 0)
138 goto error;
139 vp_dev->msix_enabled = 1;
140
141 /* Set the vector used for configuration */
142 v = vp_dev->msix_used_vectors;
143 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
144 "%s-config", name);
145 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
146 vp_config_changed, 0, vp_dev->msix_names[v],
147 vp_dev);
148 if (err)
149 goto error;
150 ++vp_dev->msix_used_vectors;
151
152 v = vp_dev->config_vector(vp_dev, v);
153 /* Verify we had enough resources to assign the vector */
154 if (v == VIRTIO_MSI_NO_VECTOR) {
155 err = -EBUSY;
156 goto error;
157 }
158
159 if (!per_vq_vectors) {
160 /* Shared vector for all VQs */
161 v = vp_dev->msix_used_vectors;
162 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
163 "%s-virtqueues", name);
164 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
165 vp_vring_interrupt, 0, vp_dev->msix_names[v],
166 vp_dev);
167 if (err)
168 goto error;
169 ++vp_dev->msix_used_vectors;
170 }
171 return 0;
172error:
173 return err;
174}
175
176static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
177 void (*callback)(struct virtqueue *vq),
178 const char *name,
179 bool ctx,
180 u16 msix_vec)
181{
182 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
183 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
184 struct virtqueue *vq;
185 unsigned long flags;
186
187 /* fill out our structure that represents an active queue */
188 if (!info)
189 return ERR_PTR(-ENOMEM);
190
191 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
192 msix_vec);
193 if (IS_ERR(vq))
194 goto out_info;
195
196 info->vq = vq;
197 if (callback) {
198 spin_lock_irqsave(&vp_dev->lock, flags);
199 list_add(&info->node, &vp_dev->virtqueues);
200 spin_unlock_irqrestore(&vp_dev->lock, flags);
201 } else {
202 INIT_LIST_HEAD(&info->node);
203 }
204
205 vp_dev->vqs[index] = info;
206 return vq;
207
208out_info:
209 kfree(info);
210 return vq;
211}
212
213static void vp_del_vq(struct virtqueue *vq)
214{
215 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
216 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
217 unsigned long flags;
218
219 spin_lock_irqsave(&vp_dev->lock, flags);
220 list_del(&info->node);
221 spin_unlock_irqrestore(&vp_dev->lock, flags);
222
223 vp_dev->del_vq(info);
224 kfree(info);
225}
226
227/* the config->del_vqs() implementation */
228void vp_del_vqs(struct virtio_device *vdev)
229{
230 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
231 struct virtqueue *vq, *n;
232 int i;
233
234 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
235 if (vp_dev->per_vq_vectors) {
236 int v = vp_dev->vqs[vq->index]->msix_vector;
237
238 if (v != VIRTIO_MSI_NO_VECTOR) {
239 int irq = pci_irq_vector(vp_dev->pci_dev, v);
240
241 irq_set_affinity_hint(irq, NULL);
242 free_irq(irq, vq);
243 }
244 }
245 vp_del_vq(vq);
246 }
247 vp_dev->per_vq_vectors = false;
248
249 if (vp_dev->intx_enabled) {
250 free_irq(vp_dev->pci_dev->irq, vp_dev);
251 vp_dev->intx_enabled = 0;
252 }
253
254 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
255 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
256
257 for (i = 0; i < vp_dev->msix_vectors; i++)
258 if (vp_dev->msix_affinity_masks[i])
259 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
260
261 if (vp_dev->msix_enabled) {
262 /* Disable the vector used for configuration */
263 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
264
265 pci_free_irq_vectors(vp_dev->pci_dev);
266 vp_dev->msix_enabled = 0;
267 }
268
269 vp_dev->msix_vectors = 0;
270 vp_dev->msix_used_vectors = 0;
271 kfree(vp_dev->msix_names);
272 vp_dev->msix_names = NULL;
273 kfree(vp_dev->msix_affinity_masks);
274 vp_dev->msix_affinity_masks = NULL;
275 kfree(vp_dev->vqs);
276 vp_dev->vqs = NULL;
277}
278
279static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
280 struct virtqueue *vqs[], vq_callback_t *callbacks[],
281 const char * const names[], bool per_vq_vectors,
282 const bool *ctx,
283 struct irq_affinity *desc)
284{
285 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
286 u16 msix_vec;
287 int i, err, nvectors, allocated_vectors;
288
289 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
290 if (!vp_dev->vqs)
291 return -ENOMEM;
292
293 if (per_vq_vectors) {
294 /* Best option: one for change interrupt, one per vq. */
295 nvectors = 1;
296 for (i = 0; i < nvqs; ++i)
297 if (callbacks[i])
298 ++nvectors;
299 } else {
300 /* Second best: one for change, shared for all vqs. */
301 nvectors = 2;
302 }
303
304 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
305 per_vq_vectors ? desc : NULL);
306 if (err)
307 goto error_find;
308
309 vp_dev->per_vq_vectors = per_vq_vectors;
310 allocated_vectors = vp_dev->msix_used_vectors;
311 for (i = 0; i < nvqs; ++i) {
312 if (!names[i]) {
313 vqs[i] = NULL;
314 continue;
315 }
316
317 if (!callbacks[i])
318 msix_vec = VIRTIO_MSI_NO_VECTOR;
319 else if (vp_dev->per_vq_vectors)
320 msix_vec = allocated_vectors++;
321 else
322 msix_vec = VP_MSIX_VQ_VECTOR;
323 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
324 ctx ? ctx[i] : false,
325 msix_vec);
326 if (IS_ERR(vqs[i])) {
327 err = PTR_ERR(vqs[i]);
328 goto error_find;
329 }
330
331 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
332 continue;
333
334 /* allocate per-vq irq if available and necessary */
335 snprintf(vp_dev->msix_names[msix_vec],
336 sizeof *vp_dev->msix_names,
337 "%s-%s",
338 dev_name(&vp_dev->vdev.dev), names[i]);
339 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
340 vring_interrupt, 0,
341 vp_dev->msix_names[msix_vec],
342 vqs[i]);
343 if (err)
344 goto error_find;
345 }
346 return 0;
347
348error_find:
349 vp_del_vqs(vdev);
350 return err;
351}
352
353static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
354 struct virtqueue *vqs[], vq_callback_t *callbacks[],
355 const char * const names[], const bool *ctx)
356{
357 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
358 int i, err;
359
360 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
361 if (!vp_dev->vqs)
362 return -ENOMEM;
363
364 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
365 dev_name(&vdev->dev), vp_dev);
366 if (err)
367 goto out_del_vqs;
368
369 vp_dev->intx_enabled = 1;
370 vp_dev->per_vq_vectors = false;
371 for (i = 0; i < nvqs; ++i) {
372 if (!names[i]) {
373 vqs[i] = NULL;
374 continue;
375 }
376 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
377 ctx ? ctx[i] : false,
378 VIRTIO_MSI_NO_VECTOR);
379 if (IS_ERR(vqs[i])) {
380 err = PTR_ERR(vqs[i]);
381 goto out_del_vqs;
382 }
383 }
384
385 return 0;
386out_del_vqs:
387 vp_del_vqs(vdev);
388 return err;
389}
390
391/* the config->find_vqs() implementation */
392int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
393 struct virtqueue *vqs[], vq_callback_t *callbacks[],
394 const char * const names[], const bool *ctx,
395 struct irq_affinity *desc)
396{
397 int err;
398
399 /* Try MSI-X with one vector per queue. */
400 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
401 if (!err)
402 return 0;
403 /* Fallback: MSI-X with one vector for config, one shared for queues. */
404 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
405 if (!err)
406 return 0;
407 /* Finally fall back to regular interrupts. */
408 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
409}
410
411const char *vp_bus_name(struct virtio_device *vdev)
412{
413 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
414
415 return pci_name(vp_dev->pci_dev);
416}
417
418/* Setup the affinity for a virtqueue:
419 * - force the affinity for per vq vector
420 * - OR over all affinities for shared MSI
421 * - ignore the affinity request if we're using INTX
422 */
423int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
424{
425 struct virtio_device *vdev = vq->vdev;
426 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
427 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
428 struct cpumask *mask;
429 unsigned int irq;
430
431 if (!vq->callback)
432 return -EINVAL;
433
434 if (vp_dev->msix_enabled) {
435 mask = vp_dev->msix_affinity_masks[info->msix_vector];
436 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
437 if (cpu == -1)
438 irq_set_affinity_hint(irq, NULL);
439 else {
440 cpumask_clear(mask);
441 cpumask_set_cpu(cpu, mask);
442 irq_set_affinity_hint(irq, mask);
443 }
444 }
445 return 0;
446}
447
448const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
449{
450 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
451
452 if (!vp_dev->per_vq_vectors ||
453 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
454 return NULL;
455
456 return pci_irq_get_affinity(vp_dev->pci_dev,
457 vp_dev->vqs[index]->msix_vector);
458}
459
460#ifdef CONFIG_PM_SLEEP
461static int virtio_pci_freeze(struct device *dev)
462{
463 struct pci_dev *pci_dev = to_pci_dev(dev);
464 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
465 int ret;
466
467 ret = virtio_device_freeze(&vp_dev->vdev);
468
469 if (!ret)
470 pci_disable_device(pci_dev);
471 return ret;
472}
473
474static int virtio_pci_restore(struct device *dev)
475{
476 struct pci_dev *pci_dev = to_pci_dev(dev);
477 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
478 int ret;
479
480 ret = pci_enable_device(pci_dev);
481 if (ret)
482 return ret;
483
484 pci_set_master(pci_dev);
485 return virtio_device_restore(&vp_dev->vdev);
486}
487
488static const struct dev_pm_ops virtio_pci_pm_ops = {
489 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
490};
491#endif
492
493
494/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
495static const struct pci_device_id virtio_pci_id_table[] = {
496 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
497 { 0 }
498};
499
500MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
501
502static void virtio_pci_release_dev(struct device *_d)
503{
504 struct virtio_device *vdev = dev_to_virtio(_d);
505 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
506
507 /* As struct device is a kobject, it's not safe to
508 * free the memory (including the reference counter itself)
509 * until it's release callback. */
510 kfree(vp_dev);
511}
512
513static int virtio_pci_probe(struct pci_dev *pci_dev,
514 const struct pci_device_id *id)
515{
516 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
517 int rc;
518
519 /* allocate our structure and fill it out */
520 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
521 if (!vp_dev)
522 return -ENOMEM;
523
524 pci_set_drvdata(pci_dev, vp_dev);
525 vp_dev->vdev.dev.parent = &pci_dev->dev;
526 vp_dev->vdev.dev.release = virtio_pci_release_dev;
527 vp_dev->pci_dev = pci_dev;
528 INIT_LIST_HEAD(&vp_dev->virtqueues);
529 spin_lock_init(&vp_dev->lock);
530
531 /* enable the device */
532 rc = pci_enable_device(pci_dev);
533 if (rc)
534 goto err_enable_device;
535
536 if (force_legacy) {
537 rc = virtio_pci_legacy_probe(vp_dev);
538 /* Also try modern mode if we can't map BAR0 (no IO space). */
539 if (rc == -ENODEV || rc == -ENOMEM)
540 rc = virtio_pci_modern_probe(vp_dev);
541 if (rc)
542 goto err_probe;
543 } else {
544 rc = virtio_pci_modern_probe(vp_dev);
545 if (rc == -ENODEV)
546 rc = virtio_pci_legacy_probe(vp_dev);
547 if (rc)
548 goto err_probe;
549 }
550
551 pci_set_master(pci_dev);
552
553 rc = register_virtio_device(&vp_dev->vdev);
554 reg_dev = vp_dev;
555 if (rc)
556 goto err_register;
557
558 return 0;
559
560err_register:
561 if (vp_dev->ioaddr)
562 virtio_pci_legacy_remove(vp_dev);
563 else
564 virtio_pci_modern_remove(vp_dev);
565err_probe:
566 pci_disable_device(pci_dev);
567err_enable_device:
568 if (reg_dev)
569 put_device(&vp_dev->vdev.dev);
570 else
571 kfree(vp_dev);
572 return rc;
573}
574
575static void virtio_pci_remove(struct pci_dev *pci_dev)
576{
577 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
578 struct device *dev = get_device(&vp_dev->vdev.dev);
579
580 unregister_virtio_device(&vp_dev->vdev);
581
582 if (vp_dev->ioaddr)
583 virtio_pci_legacy_remove(vp_dev);
584 else
585 virtio_pci_modern_remove(vp_dev);
586
587 pci_disable_device(pci_dev);
588 put_device(dev);
589}
590
591static struct pci_driver virtio_pci_driver = {
592 .name = "virtio-pci",
593 .id_table = virtio_pci_id_table,
594 .probe = virtio_pci_probe,
595 .remove = virtio_pci_remove,
596#ifdef CONFIG_PM_SLEEP
597 .driver.pm = &virtio_pci_pm_ops,
598#endif
599};
600
601module_pci_driver(virtio_pci_driver);
602
603MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
604MODULE_DESCRIPTION("virtio-pci");
605MODULE_LICENSE("GPL");
606MODULE_VERSION("1");