Loading...
1/*
2 * Virtio PCI driver - common functionality for all device versions
3 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
8 * Copyright Red Hat, Inc. 2014
9 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
12 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
20#include "virtio_pci_common.h"
21
22static bool force_legacy = false;
23
24#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25module_param(force_legacy, bool, 0444);
26MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
28#endif
29
30/* wait for pending irq handlers */
31void vp_synchronize_vectors(struct virtio_device *vdev)
32{
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i;
35
36 if (vp_dev->intx_enabled)
37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
40 synchronize_irq(vp_dev->msix_entries[i].vector);
41}
42
43/* the notify function used when creating a virt queue */
44bool vp_notify(struct virtqueue *vq)
45{
46 /* we write the queue's selector into the notification register to
47 * signal the other end */
48 iowrite16(vq->index, (void __iomem *)vq->priv);
49 return true;
50}
51
52/* Handle a configuration change: Tell driver if it wants to know. */
53static irqreturn_t vp_config_changed(int irq, void *opaque)
54{
55 struct virtio_pci_device *vp_dev = opaque;
56
57 virtio_config_changed(&vp_dev->vdev);
58 return IRQ_HANDLED;
59}
60
61/* Notify all virtqueues on an interrupt. */
62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
63{
64 struct virtio_pci_device *vp_dev = opaque;
65 struct virtio_pci_vq_info *info;
66 irqreturn_t ret = IRQ_NONE;
67 unsigned long flags;
68
69 spin_lock_irqsave(&vp_dev->lock, flags);
70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
72 ret = IRQ_HANDLED;
73 }
74 spin_unlock_irqrestore(&vp_dev->lock, flags);
75
76 return ret;
77}
78
79/* A small wrapper to also acknowledge the interrupt when it's handled.
80 * I really need an EIO hook for the vring so I can ack the interrupt once we
81 * know that we'll be handling the IRQ but before we invoke the callback since
82 * the callback may notify the host which results in the host attempting to
83 * raise an interrupt that we would then mask once we acknowledged the
84 * interrupt. */
85static irqreturn_t vp_interrupt(int irq, void *opaque)
86{
87 struct virtio_pci_device *vp_dev = opaque;
88 u8 isr;
89
90 /* reading the ISR has the effect of also clearing it so it's very
91 * important to save off the value. */
92 isr = ioread8(vp_dev->isr);
93
94 /* It's definitely not us if the ISR was not high */
95 if (!isr)
96 return IRQ_NONE;
97
98 /* Configuration change? Tell driver if it wants to know. */
99 if (isr & VIRTIO_PCI_ISR_CONFIG)
100 vp_config_changed(irq, opaque);
101
102 return vp_vring_interrupt(irq, opaque);
103}
104
105static void vp_free_vectors(struct virtio_device *vdev)
106{
107 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
108 int i;
109
110 if (vp_dev->intx_enabled) {
111 free_irq(vp_dev->pci_dev->irq, vp_dev);
112 vp_dev->intx_enabled = 0;
113 }
114
115 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
116 free_irq(vp_dev->msix_entries[i].vector, vp_dev);
117
118 for (i = 0; i < vp_dev->msix_vectors; i++)
119 if (vp_dev->msix_affinity_masks[i])
120 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
121
122 if (vp_dev->msix_enabled) {
123 /* Disable the vector used for configuration */
124 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
125
126 pci_disable_msix(vp_dev->pci_dev);
127 vp_dev->msix_enabled = 0;
128 }
129
130 vp_dev->msix_vectors = 0;
131 vp_dev->msix_used_vectors = 0;
132 kfree(vp_dev->msix_names);
133 vp_dev->msix_names = NULL;
134 kfree(vp_dev->msix_entries);
135 vp_dev->msix_entries = NULL;
136 kfree(vp_dev->msix_affinity_masks);
137 vp_dev->msix_affinity_masks = NULL;
138}
139
140static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
141 bool per_vq_vectors)
142{
143 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
144 const char *name = dev_name(&vp_dev->vdev.dev);
145 unsigned i, v;
146 int err = -ENOMEM;
147
148 vp_dev->msix_vectors = nvectors;
149
150 vp_dev->msix_entries = kmalloc(nvectors * sizeof *vp_dev->msix_entries,
151 GFP_KERNEL);
152 if (!vp_dev->msix_entries)
153 goto error;
154 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
155 GFP_KERNEL);
156 if (!vp_dev->msix_names)
157 goto error;
158 vp_dev->msix_affinity_masks
159 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
160 GFP_KERNEL);
161 if (!vp_dev->msix_affinity_masks)
162 goto error;
163 for (i = 0; i < nvectors; ++i)
164 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
165 GFP_KERNEL))
166 goto error;
167
168 for (i = 0; i < nvectors; ++i)
169 vp_dev->msix_entries[i].entry = i;
170
171 err = pci_enable_msix_exact(vp_dev->pci_dev,
172 vp_dev->msix_entries, nvectors);
173 if (err)
174 goto error;
175 vp_dev->msix_enabled = 1;
176
177 /* Set the vector used for configuration */
178 v = vp_dev->msix_used_vectors;
179 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
180 "%s-config", name);
181 err = request_irq(vp_dev->msix_entries[v].vector,
182 vp_config_changed, 0, vp_dev->msix_names[v],
183 vp_dev);
184 if (err)
185 goto error;
186 ++vp_dev->msix_used_vectors;
187
188 v = vp_dev->config_vector(vp_dev, v);
189 /* Verify we had enough resources to assign the vector */
190 if (v == VIRTIO_MSI_NO_VECTOR) {
191 err = -EBUSY;
192 goto error;
193 }
194
195 if (!per_vq_vectors) {
196 /* Shared vector for all VQs */
197 v = vp_dev->msix_used_vectors;
198 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
199 "%s-virtqueues", name);
200 err = request_irq(vp_dev->msix_entries[v].vector,
201 vp_vring_interrupt, 0, vp_dev->msix_names[v],
202 vp_dev);
203 if (err)
204 goto error;
205 ++vp_dev->msix_used_vectors;
206 }
207 return 0;
208error:
209 vp_free_vectors(vdev);
210 return err;
211}
212
213static int vp_request_intx(struct virtio_device *vdev)
214{
215 int err;
216 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
217
218 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt,
219 IRQF_SHARED, dev_name(&vdev->dev), vp_dev);
220 if (!err)
221 vp_dev->intx_enabled = 1;
222 return err;
223}
224
225static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
226 void (*callback)(struct virtqueue *vq),
227 const char *name,
228 u16 msix_vec)
229{
230 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
231 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
232 struct virtqueue *vq;
233 unsigned long flags;
234
235 /* fill out our structure that represents an active queue */
236 if (!info)
237 return ERR_PTR(-ENOMEM);
238
239 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, msix_vec);
240 if (IS_ERR(vq))
241 goto out_info;
242
243 info->vq = vq;
244 if (callback) {
245 spin_lock_irqsave(&vp_dev->lock, flags);
246 list_add(&info->node, &vp_dev->virtqueues);
247 spin_unlock_irqrestore(&vp_dev->lock, flags);
248 } else {
249 INIT_LIST_HEAD(&info->node);
250 }
251
252 vp_dev->vqs[index] = info;
253 return vq;
254
255out_info:
256 kfree(info);
257 return vq;
258}
259
260static void vp_del_vq(struct virtqueue *vq)
261{
262 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
263 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
264 unsigned long flags;
265
266 spin_lock_irqsave(&vp_dev->lock, flags);
267 list_del(&info->node);
268 spin_unlock_irqrestore(&vp_dev->lock, flags);
269
270 vp_dev->del_vq(info);
271 kfree(info);
272}
273
274/* the config->del_vqs() implementation */
275void vp_del_vqs(struct virtio_device *vdev)
276{
277 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
278 struct virtqueue *vq, *n;
279 struct virtio_pci_vq_info *info;
280
281 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
282 info = vp_dev->vqs[vq->index];
283 if (vp_dev->per_vq_vectors &&
284 info->msix_vector != VIRTIO_MSI_NO_VECTOR)
285 free_irq(vp_dev->msix_entries[info->msix_vector].vector,
286 vq);
287 vp_del_vq(vq);
288 }
289 vp_dev->per_vq_vectors = false;
290
291 vp_free_vectors(vdev);
292 kfree(vp_dev->vqs);
293 vp_dev->vqs = NULL;
294}
295
296static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
297 struct virtqueue *vqs[],
298 vq_callback_t *callbacks[],
299 const char * const names[],
300 bool use_msix,
301 bool per_vq_vectors)
302{
303 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
304 u16 msix_vec;
305 int i, err, nvectors, allocated_vectors;
306
307 vp_dev->vqs = kmalloc(nvqs * sizeof *vp_dev->vqs, GFP_KERNEL);
308 if (!vp_dev->vqs)
309 return -ENOMEM;
310
311 if (!use_msix) {
312 /* Old style: one normal interrupt for change and all vqs. */
313 err = vp_request_intx(vdev);
314 if (err)
315 goto error_find;
316 } else {
317 if (per_vq_vectors) {
318 /* Best option: one for change interrupt, one per vq. */
319 nvectors = 1;
320 for (i = 0; i < nvqs; ++i)
321 if (callbacks[i])
322 ++nvectors;
323 } else {
324 /* Second best: one for change, shared for all vqs. */
325 nvectors = 2;
326 }
327
328 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors);
329 if (err)
330 goto error_find;
331 }
332
333 vp_dev->per_vq_vectors = per_vq_vectors;
334 allocated_vectors = vp_dev->msix_used_vectors;
335 for (i = 0; i < nvqs; ++i) {
336 if (!names[i]) {
337 vqs[i] = NULL;
338 continue;
339 } else if (!callbacks[i] || !vp_dev->msix_enabled)
340 msix_vec = VIRTIO_MSI_NO_VECTOR;
341 else if (vp_dev->per_vq_vectors)
342 msix_vec = allocated_vectors++;
343 else
344 msix_vec = VP_MSIX_VQ_VECTOR;
345 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i], msix_vec);
346 if (IS_ERR(vqs[i])) {
347 err = PTR_ERR(vqs[i]);
348 goto error_find;
349 }
350
351 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
352 continue;
353
354 /* allocate per-vq irq if available and necessary */
355 snprintf(vp_dev->msix_names[msix_vec],
356 sizeof *vp_dev->msix_names,
357 "%s-%s",
358 dev_name(&vp_dev->vdev.dev), names[i]);
359 err = request_irq(vp_dev->msix_entries[msix_vec].vector,
360 vring_interrupt, 0,
361 vp_dev->msix_names[msix_vec],
362 vqs[i]);
363 if (err) {
364 vp_del_vq(vqs[i]);
365 goto error_find;
366 }
367 }
368 return 0;
369
370error_find:
371 vp_del_vqs(vdev);
372 return err;
373}
374
375/* the config->find_vqs() implementation */
376int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
377 struct virtqueue *vqs[],
378 vq_callback_t *callbacks[],
379 const char * const names[])
380{
381 int err;
382
383 /* Try MSI-X with one vector per queue. */
384 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names, true, true);
385 if (!err)
386 return 0;
387 /* Fallback: MSI-X with one vector for config, one shared for queues. */
388 err = vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
389 true, false);
390 if (!err)
391 return 0;
392 /* Finally fall back to regular interrupts. */
393 return vp_try_to_find_vqs(vdev, nvqs, vqs, callbacks, names,
394 false, false);
395}
396
397const char *vp_bus_name(struct virtio_device *vdev)
398{
399 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
400
401 return pci_name(vp_dev->pci_dev);
402}
403
404/* Setup the affinity for a virtqueue:
405 * - force the affinity for per vq vector
406 * - OR over all affinities for shared MSI
407 * - ignore the affinity request if we're using INTX
408 */
409int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
410{
411 struct virtio_device *vdev = vq->vdev;
412 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
413 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
414 struct cpumask *mask;
415 unsigned int irq;
416
417 if (!vq->callback)
418 return -EINVAL;
419
420 if (vp_dev->msix_enabled) {
421 mask = vp_dev->msix_affinity_masks[info->msix_vector];
422 irq = vp_dev->msix_entries[info->msix_vector].vector;
423 if (cpu == -1)
424 irq_set_affinity_hint(irq, NULL);
425 else {
426 cpumask_clear(mask);
427 cpumask_set_cpu(cpu, mask);
428 irq_set_affinity_hint(irq, mask);
429 }
430 }
431 return 0;
432}
433
434#ifdef CONFIG_PM_SLEEP
435static int virtio_pci_freeze(struct device *dev)
436{
437 struct pci_dev *pci_dev = to_pci_dev(dev);
438 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
439 int ret;
440
441 ret = virtio_device_freeze(&vp_dev->vdev);
442
443 if (!ret)
444 pci_disable_device(pci_dev);
445 return ret;
446}
447
448static int virtio_pci_restore(struct device *dev)
449{
450 struct pci_dev *pci_dev = to_pci_dev(dev);
451 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
452 int ret;
453
454 ret = pci_enable_device(pci_dev);
455 if (ret)
456 return ret;
457
458 pci_set_master(pci_dev);
459 return virtio_device_restore(&vp_dev->vdev);
460}
461
462static const struct dev_pm_ops virtio_pci_pm_ops = {
463 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
464};
465#endif
466
467
468/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
469static const struct pci_device_id virtio_pci_id_table[] = {
470 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
471 { 0 }
472};
473
474MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
475
476static void virtio_pci_release_dev(struct device *_d)
477{
478 struct virtio_device *vdev = dev_to_virtio(_d);
479 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
480
481 /* As struct device is a kobject, it's not safe to
482 * free the memory (including the reference counter itself)
483 * until it's release callback. */
484 kfree(vp_dev);
485}
486
487static int virtio_pci_probe(struct pci_dev *pci_dev,
488 const struct pci_device_id *id)
489{
490 struct virtio_pci_device *vp_dev;
491 int rc;
492
493 /* allocate our structure and fill it out */
494 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
495 if (!vp_dev)
496 return -ENOMEM;
497
498 pci_set_drvdata(pci_dev, vp_dev);
499 vp_dev->vdev.dev.parent = &pci_dev->dev;
500 vp_dev->vdev.dev.release = virtio_pci_release_dev;
501 vp_dev->pci_dev = pci_dev;
502 INIT_LIST_HEAD(&vp_dev->virtqueues);
503 spin_lock_init(&vp_dev->lock);
504
505 /* enable the device */
506 rc = pci_enable_device(pci_dev);
507 if (rc)
508 goto err_enable_device;
509
510 if (force_legacy) {
511 rc = virtio_pci_legacy_probe(vp_dev);
512 /* Also try modern mode if we can't map BAR0 (no IO space). */
513 if (rc == -ENODEV || rc == -ENOMEM)
514 rc = virtio_pci_modern_probe(vp_dev);
515 if (rc)
516 goto err_probe;
517 } else {
518 rc = virtio_pci_modern_probe(vp_dev);
519 if (rc == -ENODEV)
520 rc = virtio_pci_legacy_probe(vp_dev);
521 if (rc)
522 goto err_probe;
523 }
524
525 pci_set_master(pci_dev);
526
527 rc = register_virtio_device(&vp_dev->vdev);
528 if (rc)
529 goto err_register;
530
531 return 0;
532
533err_register:
534 if (vp_dev->ioaddr)
535 virtio_pci_legacy_remove(vp_dev);
536 else
537 virtio_pci_modern_remove(vp_dev);
538err_probe:
539 pci_disable_device(pci_dev);
540err_enable_device:
541 kfree(vp_dev);
542 return rc;
543}
544
545static void virtio_pci_remove(struct pci_dev *pci_dev)
546{
547 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
548 struct device *dev = get_device(&vp_dev->vdev.dev);
549
550 unregister_virtio_device(&vp_dev->vdev);
551
552 if (vp_dev->ioaddr)
553 virtio_pci_legacy_remove(vp_dev);
554 else
555 virtio_pci_modern_remove(vp_dev);
556
557 pci_disable_device(pci_dev);
558 put_device(dev);
559}
560
561static struct pci_driver virtio_pci_driver = {
562 .name = "virtio-pci",
563 .id_table = virtio_pci_id_table,
564 .probe = virtio_pci_probe,
565 .remove = virtio_pci_remove,
566#ifdef CONFIG_PM_SLEEP
567 .driver.pm = &virtio_pci_pm_ops,
568#endif
569};
570
571module_pci_driver(virtio_pci_driver);
572
573MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
574MODULE_DESCRIPTION("virtio-pci");
575MODULE_LICENSE("GPL");
576MODULE_VERSION("1");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio PCI driver - common functionality for all device versions
4 *
5 * This module allows virtio devices to be used over a virtual PCI device.
6 * This can be used with QEMU based VMMs like KVM or Xen.
7 *
8 * Copyright IBM Corp. 2007
9 * Copyright Red Hat, Inc. 2014
10 *
11 * Authors:
12 * Anthony Liguori <aliguori@us.ibm.com>
13 * Rusty Russell <rusty@rustcorp.com.au>
14 * Michael S. Tsirkin <mst@redhat.com>
15 */
16
17#include "virtio_pci_common.h"
18
19static bool force_legacy = false;
20
21#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
22module_param(force_legacy, bool, 0444);
23MODULE_PARM_DESC(force_legacy,
24 "Force legacy mode for transitional virtio 1 devices");
25#endif
26
27/* wait for pending irq handlers */
28void vp_synchronize_vectors(struct virtio_device *vdev)
29{
30 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
31 int i;
32
33 if (vp_dev->intx_enabled)
34 synchronize_irq(vp_dev->pci_dev->irq);
35
36 for (i = 0; i < vp_dev->msix_vectors; ++i)
37 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
38}
39
40/* the notify function used when creating a virt queue */
41bool vp_notify(struct virtqueue *vq)
42{
43 /* we write the queue's selector into the notification register to
44 * signal the other end */
45 iowrite16(vq->index, (void __iomem *)vq->priv);
46 return true;
47}
48
49/* Handle a configuration change: Tell driver if it wants to know. */
50static irqreturn_t vp_config_changed(int irq, void *opaque)
51{
52 struct virtio_pci_device *vp_dev = opaque;
53
54 virtio_config_changed(&vp_dev->vdev);
55 return IRQ_HANDLED;
56}
57
58/* Notify all virtqueues on an interrupt. */
59static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
60{
61 struct virtio_pci_device *vp_dev = opaque;
62 struct virtio_pci_vq_info *info;
63 irqreturn_t ret = IRQ_NONE;
64 unsigned long flags;
65
66 spin_lock_irqsave(&vp_dev->lock, flags);
67 list_for_each_entry(info, &vp_dev->virtqueues, node) {
68 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
69 ret = IRQ_HANDLED;
70 }
71 spin_unlock_irqrestore(&vp_dev->lock, flags);
72
73 return ret;
74}
75
76/* A small wrapper to also acknowledge the interrupt when it's handled.
77 * I really need an EIO hook for the vring so I can ack the interrupt once we
78 * know that we'll be handling the IRQ but before we invoke the callback since
79 * the callback may notify the host which results in the host attempting to
80 * raise an interrupt that we would then mask once we acknowledged the
81 * interrupt. */
82static irqreturn_t vp_interrupt(int irq, void *opaque)
83{
84 struct virtio_pci_device *vp_dev = opaque;
85 u8 isr;
86
87 /* reading the ISR has the effect of also clearing it so it's very
88 * important to save off the value. */
89 isr = ioread8(vp_dev->isr);
90
91 /* It's definitely not us if the ISR was not high */
92 if (!isr)
93 return IRQ_NONE;
94
95 /* Configuration change? Tell driver if it wants to know. */
96 if (isr & VIRTIO_PCI_ISR_CONFIG)
97 vp_config_changed(irq, opaque);
98
99 return vp_vring_interrupt(irq, opaque);
100}
101
102static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
103 bool per_vq_vectors, struct irq_affinity *desc)
104{
105 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
106 const char *name = dev_name(&vp_dev->vdev.dev);
107 unsigned flags = PCI_IRQ_MSIX;
108 unsigned i, v;
109 int err = -ENOMEM;
110
111 vp_dev->msix_vectors = nvectors;
112
113 vp_dev->msix_names = kmalloc_array(nvectors,
114 sizeof(*vp_dev->msix_names),
115 GFP_KERNEL);
116 if (!vp_dev->msix_names)
117 goto error;
118 vp_dev->msix_affinity_masks
119 = kcalloc(nvectors, sizeof(*vp_dev->msix_affinity_masks),
120 GFP_KERNEL);
121 if (!vp_dev->msix_affinity_masks)
122 goto error;
123 for (i = 0; i < nvectors; ++i)
124 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
125 GFP_KERNEL))
126 goto error;
127
128 if (desc) {
129 flags |= PCI_IRQ_AFFINITY;
130 desc->pre_vectors++; /* virtio config vector */
131 }
132
133 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
134 nvectors, flags, desc);
135 if (err < 0)
136 goto error;
137 vp_dev->msix_enabled = 1;
138
139 /* Set the vector used for configuration */
140 v = vp_dev->msix_used_vectors;
141 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
142 "%s-config", name);
143 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
144 vp_config_changed, 0, vp_dev->msix_names[v],
145 vp_dev);
146 if (err)
147 goto error;
148 ++vp_dev->msix_used_vectors;
149
150 v = vp_dev->config_vector(vp_dev, v);
151 /* Verify we had enough resources to assign the vector */
152 if (v == VIRTIO_MSI_NO_VECTOR) {
153 err = -EBUSY;
154 goto error;
155 }
156
157 if (!per_vq_vectors) {
158 /* Shared vector for all VQs */
159 v = vp_dev->msix_used_vectors;
160 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
161 "%s-virtqueues", name);
162 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
163 vp_vring_interrupt, 0, vp_dev->msix_names[v],
164 vp_dev);
165 if (err)
166 goto error;
167 ++vp_dev->msix_used_vectors;
168 }
169 return 0;
170error:
171 return err;
172}
173
174static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
175 void (*callback)(struct virtqueue *vq),
176 const char *name,
177 bool ctx,
178 u16 msix_vec)
179{
180 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
181 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
182 struct virtqueue *vq;
183 unsigned long flags;
184
185 /* fill out our structure that represents an active queue */
186 if (!info)
187 return ERR_PTR(-ENOMEM);
188
189 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name, ctx,
190 msix_vec);
191 if (IS_ERR(vq))
192 goto out_info;
193
194 info->vq = vq;
195 if (callback) {
196 spin_lock_irqsave(&vp_dev->lock, flags);
197 list_add(&info->node, &vp_dev->virtqueues);
198 spin_unlock_irqrestore(&vp_dev->lock, flags);
199 } else {
200 INIT_LIST_HEAD(&info->node);
201 }
202
203 vp_dev->vqs[index] = info;
204 return vq;
205
206out_info:
207 kfree(info);
208 return vq;
209}
210
211static void vp_del_vq(struct virtqueue *vq)
212{
213 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
214 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
215 unsigned long flags;
216
217 spin_lock_irqsave(&vp_dev->lock, flags);
218 list_del(&info->node);
219 spin_unlock_irqrestore(&vp_dev->lock, flags);
220
221 vp_dev->del_vq(info);
222 kfree(info);
223}
224
225/* the config->del_vqs() implementation */
226void vp_del_vqs(struct virtio_device *vdev)
227{
228 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
229 struct virtqueue *vq, *n;
230 int i;
231
232 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
233 if (vp_dev->per_vq_vectors) {
234 int v = vp_dev->vqs[vq->index]->msix_vector;
235
236 if (v != VIRTIO_MSI_NO_VECTOR) {
237 int irq = pci_irq_vector(vp_dev->pci_dev, v);
238
239 irq_set_affinity_hint(irq, NULL);
240 free_irq(irq, vq);
241 }
242 }
243 vp_del_vq(vq);
244 }
245 vp_dev->per_vq_vectors = false;
246
247 if (vp_dev->intx_enabled) {
248 free_irq(vp_dev->pci_dev->irq, vp_dev);
249 vp_dev->intx_enabled = 0;
250 }
251
252 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
253 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
254
255 if (vp_dev->msix_affinity_masks) {
256 for (i = 0; i < vp_dev->msix_vectors; i++)
257 if (vp_dev->msix_affinity_masks[i])
258 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
259 }
260
261 if (vp_dev->msix_enabled) {
262 /* Disable the vector used for configuration */
263 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
264
265 pci_free_irq_vectors(vp_dev->pci_dev);
266 vp_dev->msix_enabled = 0;
267 }
268
269 vp_dev->msix_vectors = 0;
270 vp_dev->msix_used_vectors = 0;
271 kfree(vp_dev->msix_names);
272 vp_dev->msix_names = NULL;
273 kfree(vp_dev->msix_affinity_masks);
274 vp_dev->msix_affinity_masks = NULL;
275 kfree(vp_dev->vqs);
276 vp_dev->vqs = NULL;
277}
278
279static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
280 struct virtqueue *vqs[], vq_callback_t *callbacks[],
281 const char * const names[], bool per_vq_vectors,
282 const bool *ctx,
283 struct irq_affinity *desc)
284{
285 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
286 u16 msix_vec;
287 int i, err, nvectors, allocated_vectors, queue_idx = 0;
288
289 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
290 if (!vp_dev->vqs)
291 return -ENOMEM;
292
293 if (per_vq_vectors) {
294 /* Best option: one for change interrupt, one per vq. */
295 nvectors = 1;
296 for (i = 0; i < nvqs; ++i)
297 if (names[i] && callbacks[i])
298 ++nvectors;
299 } else {
300 /* Second best: one for change, shared for all vqs. */
301 nvectors = 2;
302 }
303
304 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
305 per_vq_vectors ? desc : NULL);
306 if (err)
307 goto error_find;
308
309 vp_dev->per_vq_vectors = per_vq_vectors;
310 allocated_vectors = vp_dev->msix_used_vectors;
311 for (i = 0; i < nvqs; ++i) {
312 if (!names[i]) {
313 vqs[i] = NULL;
314 continue;
315 }
316
317 if (!callbacks[i])
318 msix_vec = VIRTIO_MSI_NO_VECTOR;
319 else if (vp_dev->per_vq_vectors)
320 msix_vec = allocated_vectors++;
321 else
322 msix_vec = VP_MSIX_VQ_VECTOR;
323 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
324 ctx ? ctx[i] : false,
325 msix_vec);
326 if (IS_ERR(vqs[i])) {
327 err = PTR_ERR(vqs[i]);
328 goto error_find;
329 }
330
331 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
332 continue;
333
334 /* allocate per-vq irq if available and necessary */
335 snprintf(vp_dev->msix_names[msix_vec],
336 sizeof *vp_dev->msix_names,
337 "%s-%s",
338 dev_name(&vp_dev->vdev.dev), names[i]);
339 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
340 vring_interrupt, 0,
341 vp_dev->msix_names[msix_vec],
342 vqs[i]);
343 if (err)
344 goto error_find;
345 }
346 return 0;
347
348error_find:
349 vp_del_vqs(vdev);
350 return err;
351}
352
353static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
354 struct virtqueue *vqs[], vq_callback_t *callbacks[],
355 const char * const names[], const bool *ctx)
356{
357 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
358 int i, err, queue_idx = 0;
359
360 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
361 if (!vp_dev->vqs)
362 return -ENOMEM;
363
364 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
365 dev_name(&vdev->dev), vp_dev);
366 if (err)
367 goto out_del_vqs;
368
369 vp_dev->intx_enabled = 1;
370 vp_dev->per_vq_vectors = false;
371 for (i = 0; i < nvqs; ++i) {
372 if (!names[i]) {
373 vqs[i] = NULL;
374 continue;
375 }
376 vqs[i] = vp_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
377 ctx ? ctx[i] : false,
378 VIRTIO_MSI_NO_VECTOR);
379 if (IS_ERR(vqs[i])) {
380 err = PTR_ERR(vqs[i]);
381 goto out_del_vqs;
382 }
383 }
384
385 return 0;
386out_del_vqs:
387 vp_del_vqs(vdev);
388 return err;
389}
390
391/* the config->find_vqs() implementation */
392int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
393 struct virtqueue *vqs[], vq_callback_t *callbacks[],
394 const char * const names[], const bool *ctx,
395 struct irq_affinity *desc)
396{
397 int err;
398
399 /* Try MSI-X with one vector per queue. */
400 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, ctx, desc);
401 if (!err)
402 return 0;
403 /* Fallback: MSI-X with one vector for config, one shared for queues. */
404 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, ctx, desc);
405 if (!err)
406 return 0;
407 /* Finally fall back to regular interrupts. */
408 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names, ctx);
409}
410
411const char *vp_bus_name(struct virtio_device *vdev)
412{
413 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
414
415 return pci_name(vp_dev->pci_dev);
416}
417
418/* Setup the affinity for a virtqueue:
419 * - force the affinity for per vq vector
420 * - OR over all affinities for shared MSI
421 * - ignore the affinity request if we're using INTX
422 */
423int vp_set_vq_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
424{
425 struct virtio_device *vdev = vq->vdev;
426 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
427 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
428 struct cpumask *mask;
429 unsigned int irq;
430
431 if (!vq->callback)
432 return -EINVAL;
433
434 if (vp_dev->msix_enabled) {
435 mask = vp_dev->msix_affinity_masks[info->msix_vector];
436 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
437 if (!cpu_mask)
438 irq_set_affinity_hint(irq, NULL);
439 else {
440 cpumask_copy(mask, cpu_mask);
441 irq_set_affinity_hint(irq, mask);
442 }
443 }
444 return 0;
445}
446
447const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
448{
449 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
450
451 if (!vp_dev->per_vq_vectors ||
452 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
453 return NULL;
454
455 return pci_irq_get_affinity(vp_dev->pci_dev,
456 vp_dev->vqs[index]->msix_vector);
457}
458
459#ifdef CONFIG_PM_SLEEP
460static int virtio_pci_freeze(struct device *dev)
461{
462 struct pci_dev *pci_dev = to_pci_dev(dev);
463 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
464 int ret;
465
466 ret = virtio_device_freeze(&vp_dev->vdev);
467
468 if (!ret)
469 pci_disable_device(pci_dev);
470 return ret;
471}
472
473static int virtio_pci_restore(struct device *dev)
474{
475 struct pci_dev *pci_dev = to_pci_dev(dev);
476 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
477 int ret;
478
479 ret = pci_enable_device(pci_dev);
480 if (ret)
481 return ret;
482
483 pci_set_master(pci_dev);
484 return virtio_device_restore(&vp_dev->vdev);
485}
486
487static const struct dev_pm_ops virtio_pci_pm_ops = {
488 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
489};
490#endif
491
492
493/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
494static const struct pci_device_id virtio_pci_id_table[] = {
495 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
496 { 0 }
497};
498
499MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
500
501static void virtio_pci_release_dev(struct device *_d)
502{
503 struct virtio_device *vdev = dev_to_virtio(_d);
504 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
505
506 /* As struct device is a kobject, it's not safe to
507 * free the memory (including the reference counter itself)
508 * until it's release callback. */
509 kfree(vp_dev);
510}
511
512static int virtio_pci_probe(struct pci_dev *pci_dev,
513 const struct pci_device_id *id)
514{
515 struct virtio_pci_device *vp_dev, *reg_dev = NULL;
516 int rc;
517
518 /* allocate our structure and fill it out */
519 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
520 if (!vp_dev)
521 return -ENOMEM;
522
523 pci_set_drvdata(pci_dev, vp_dev);
524 vp_dev->vdev.dev.parent = &pci_dev->dev;
525 vp_dev->vdev.dev.release = virtio_pci_release_dev;
526 vp_dev->pci_dev = pci_dev;
527 INIT_LIST_HEAD(&vp_dev->virtqueues);
528 spin_lock_init(&vp_dev->lock);
529
530 /* enable the device */
531 rc = pci_enable_device(pci_dev);
532 if (rc)
533 goto err_enable_device;
534
535 if (force_legacy) {
536 rc = virtio_pci_legacy_probe(vp_dev);
537 /* Also try modern mode if we can't map BAR0 (no IO space). */
538 if (rc == -ENODEV || rc == -ENOMEM)
539 rc = virtio_pci_modern_probe(vp_dev);
540 if (rc)
541 goto err_probe;
542 } else {
543 rc = virtio_pci_modern_probe(vp_dev);
544 if (rc == -ENODEV)
545 rc = virtio_pci_legacy_probe(vp_dev);
546 if (rc)
547 goto err_probe;
548 }
549
550 pci_set_master(pci_dev);
551
552 rc = register_virtio_device(&vp_dev->vdev);
553 reg_dev = vp_dev;
554 if (rc)
555 goto err_register;
556
557 return 0;
558
559err_register:
560 if (vp_dev->ioaddr)
561 virtio_pci_legacy_remove(vp_dev);
562 else
563 virtio_pci_modern_remove(vp_dev);
564err_probe:
565 pci_disable_device(pci_dev);
566err_enable_device:
567 if (reg_dev)
568 put_device(&vp_dev->vdev.dev);
569 else
570 kfree(vp_dev);
571 return rc;
572}
573
574static void virtio_pci_remove(struct pci_dev *pci_dev)
575{
576 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
577 struct device *dev = get_device(&vp_dev->vdev.dev);
578
579 pci_disable_sriov(pci_dev);
580
581 unregister_virtio_device(&vp_dev->vdev);
582
583 if (vp_dev->ioaddr)
584 virtio_pci_legacy_remove(vp_dev);
585 else
586 virtio_pci_modern_remove(vp_dev);
587
588 pci_disable_device(pci_dev);
589 put_device(dev);
590}
591
592static int virtio_pci_sriov_configure(struct pci_dev *pci_dev, int num_vfs)
593{
594 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
595 struct virtio_device *vdev = &vp_dev->vdev;
596 int ret;
597
598 if (!(vdev->config->get_status(vdev) & VIRTIO_CONFIG_S_DRIVER_OK))
599 return -EBUSY;
600
601 if (!__virtio_test_bit(vdev, VIRTIO_F_SR_IOV))
602 return -EINVAL;
603
604 if (pci_vfs_assigned(pci_dev))
605 return -EPERM;
606
607 if (num_vfs == 0) {
608 pci_disable_sriov(pci_dev);
609 return 0;
610 }
611
612 ret = pci_enable_sriov(pci_dev, num_vfs);
613 if (ret < 0)
614 return ret;
615
616 return num_vfs;
617}
618
619static struct pci_driver virtio_pci_driver = {
620 .name = "virtio-pci",
621 .id_table = virtio_pci_id_table,
622 .probe = virtio_pci_probe,
623 .remove = virtio_pci_remove,
624#ifdef CONFIG_PM_SLEEP
625 .driver.pm = &virtio_pci_pm_ops,
626#endif
627 .sriov_configure = virtio_pci_sriov_configure,
628};
629
630module_pci_driver(virtio_pci_driver);
631
632MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
633MODULE_DESCRIPTION("virtio-pci");
634MODULE_LICENSE("GPL");
635MODULE_VERSION("1");