Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio memory mapped device driver
4 *
5 * Copyright 2011-2014, ARM Ltd.
6 *
7 * This module allows virtio devices to be used over a virtual, memory mapped
8 * platform device.
9 *
10 * The guest device(s) may be instantiated in one of three equivalent ways:
11 *
12 * 1. Static platform device in board's code, eg.:
13 *
14 * static struct platform_device v2m_virtio_device = {
15 * .name = "virtio-mmio",
16 * .id = -1,
17 * .num_resources = 2,
18 * .resource = (struct resource []) {
19 * {
20 * .start = 0x1001e000,
21 * .end = 0x1001e0ff,
22 * .flags = IORESOURCE_MEM,
23 * }, {
24 * .start = 42 + 32,
25 * .end = 42 + 32,
26 * .flags = IORESOURCE_IRQ,
27 * },
28 * }
29 * };
30 *
31 * 2. Device Tree node, eg.:
32 *
33 * virtio_block@1e000 {
34 * compatible = "virtio,mmio";
35 * reg = <0x1e000 0x100>;
36 * interrupts = <42>;
37 * }
38 *
39 * 3. Kernel module (or command line) parameter. Can be used more than once -
40 * one device will be created for each one. Syntax:
41 *
42 * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
43 * where:
44 * <size> := size (can use standard suffixes like K, M or G)
45 * <baseaddr> := physical base address
46 * <irq> := interrupt number (as passed to request_irq())
47 * <id> := (optional) platform device id
48 * eg.:
49 * virtio_mmio.device=0x100@0x100b0000:48 \
50 * virtio_mmio.device=1K@0x1001e000:74
51 *
52 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
53 */
54
55#define pr_fmt(fmt) "virtio-mmio: " fmt
56
57#include <linux/acpi.h>
58#include <linux/dma-mapping.h>
59#include <linux/highmem.h>
60#include <linux/interrupt.h>
61#include <linux/io.h>
62#include <linux/list.h>
63#include <linux/module.h>
64#include <linux/platform_device.h>
65#include <linux/slab.h>
66#include <linux/spinlock.h>
67#include <linux/virtio.h>
68#include <linux/virtio_config.h>
69#include <uapi/linux/virtio_mmio.h>
70#include <linux/virtio_ring.h>
71
72
73
74/* The alignment to use between consumer and producer parts of vring.
75 * Currently hardcoded to the page size. */
76#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
77
78
79
80#define to_virtio_mmio_device(_plat_dev) \
81 container_of(_plat_dev, struct virtio_mmio_device, vdev)
82
83struct virtio_mmio_device {
84 struct virtio_device vdev;
85 struct platform_device *pdev;
86
87 void __iomem *base;
88 unsigned long version;
89
90 /* a list of queues so we can dispatch IRQs */
91 spinlock_t lock;
92 struct list_head virtqueues;
93};
94
95struct virtio_mmio_vq_info {
96 /* the actual virtqueue */
97 struct virtqueue *vq;
98
99 /* the list node for the virtqueues list */
100 struct list_head node;
101};
102
103
104
105/* Configuration interface */
106
107static u64 vm_get_features(struct virtio_device *vdev)
108{
109 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
110 u64 features;
111
112 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
113 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
114 features <<= 32;
115
116 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
117 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
118
119 return features;
120}
121
122static int vm_finalize_features(struct virtio_device *vdev)
123{
124 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
125
126 /* Give virtio_ring a chance to accept features. */
127 vring_transport_features(vdev);
128
129 /* Make sure there is are no mixed devices */
130 if (vm_dev->version == 2 &&
131 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
132 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
133 return -EINVAL;
134 }
135
136 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
137 writel((u32)(vdev->features >> 32),
138 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
139
140 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
141 writel((u32)vdev->features,
142 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
143
144 return 0;
145}
146
147static void vm_get(struct virtio_device *vdev, unsigned offset,
148 void *buf, unsigned len)
149{
150 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
151 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
152 u8 b;
153 __le16 w;
154 __le32 l;
155
156 if (vm_dev->version == 1) {
157 u8 *ptr = buf;
158 int i;
159
160 for (i = 0; i < len; i++)
161 ptr[i] = readb(base + offset + i);
162 return;
163 }
164
165 switch (len) {
166 case 1:
167 b = readb(base + offset);
168 memcpy(buf, &b, sizeof b);
169 break;
170 case 2:
171 w = cpu_to_le16(readw(base + offset));
172 memcpy(buf, &w, sizeof w);
173 break;
174 case 4:
175 l = cpu_to_le32(readl(base + offset));
176 memcpy(buf, &l, sizeof l);
177 break;
178 case 8:
179 l = cpu_to_le32(readl(base + offset));
180 memcpy(buf, &l, sizeof l);
181 l = cpu_to_le32(ioread32(base + offset + sizeof l));
182 memcpy(buf + sizeof l, &l, sizeof l);
183 break;
184 default:
185 BUG();
186 }
187}
188
189static void vm_set(struct virtio_device *vdev, unsigned offset,
190 const void *buf, unsigned len)
191{
192 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
193 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
194 u8 b;
195 __le16 w;
196 __le32 l;
197
198 if (vm_dev->version == 1) {
199 const u8 *ptr = buf;
200 int i;
201
202 for (i = 0; i < len; i++)
203 writeb(ptr[i], base + offset + i);
204
205 return;
206 }
207
208 switch (len) {
209 case 1:
210 memcpy(&b, buf, sizeof b);
211 writeb(b, base + offset);
212 break;
213 case 2:
214 memcpy(&w, buf, sizeof w);
215 writew(le16_to_cpu(w), base + offset);
216 break;
217 case 4:
218 memcpy(&l, buf, sizeof l);
219 writel(le32_to_cpu(l), base + offset);
220 break;
221 case 8:
222 memcpy(&l, buf, sizeof l);
223 writel(le32_to_cpu(l), base + offset);
224 memcpy(&l, buf + sizeof l, sizeof l);
225 writel(le32_to_cpu(l), base + offset + sizeof l);
226 break;
227 default:
228 BUG();
229 }
230}
231
232static u32 vm_generation(struct virtio_device *vdev)
233{
234 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
235
236 if (vm_dev->version == 1)
237 return 0;
238 else
239 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
240}
241
242static u8 vm_get_status(struct virtio_device *vdev)
243{
244 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
245
246 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
247}
248
249static void vm_set_status(struct virtio_device *vdev, u8 status)
250{
251 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
252
253 /* We should never be setting status to 0. */
254 BUG_ON(status == 0);
255
256 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
257}
258
259static void vm_reset(struct virtio_device *vdev)
260{
261 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
262
263 /* 0 status means a reset. */
264 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
265}
266
267
268
269/* Transport interface */
270
271/* the notify function used when creating a virt queue */
272static bool vm_notify(struct virtqueue *vq)
273{
274 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
275
276 /* We write the queue's selector into the notification register to
277 * signal the other end */
278 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
279 return true;
280}
281
282/* Notify all virtqueues on an interrupt. */
283static irqreturn_t vm_interrupt(int irq, void *opaque)
284{
285 struct virtio_mmio_device *vm_dev = opaque;
286 struct virtio_mmio_vq_info *info;
287 unsigned long status;
288 unsigned long flags;
289 irqreturn_t ret = IRQ_NONE;
290
291 /* Read and acknowledge interrupts */
292 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
293 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
294
295 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
296 virtio_config_changed(&vm_dev->vdev);
297 ret = IRQ_HANDLED;
298 }
299
300 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
301 spin_lock_irqsave(&vm_dev->lock, flags);
302 list_for_each_entry(info, &vm_dev->virtqueues, node)
303 ret |= vring_interrupt(irq, info->vq);
304 spin_unlock_irqrestore(&vm_dev->lock, flags);
305 }
306
307 return ret;
308}
309
310
311
312static void vm_del_vq(struct virtqueue *vq)
313{
314 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
315 struct virtio_mmio_vq_info *info = vq->priv;
316 unsigned long flags;
317 unsigned int index = vq->index;
318
319 spin_lock_irqsave(&vm_dev->lock, flags);
320 list_del(&info->node);
321 spin_unlock_irqrestore(&vm_dev->lock, flags);
322
323 /* Select and deactivate the queue */
324 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
325 if (vm_dev->version == 1) {
326 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
327 } else {
328 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
329 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
330 }
331
332 vring_del_virtqueue(vq);
333
334 kfree(info);
335}
336
337static void vm_del_vqs(struct virtio_device *vdev)
338{
339 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
340 struct virtqueue *vq, *n;
341
342 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
343 vm_del_vq(vq);
344
345 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
346}
347
348static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned index,
349 void (*callback)(struct virtqueue *vq),
350 const char *name, bool ctx)
351{
352 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
353 struct virtio_mmio_vq_info *info;
354 struct virtqueue *vq;
355 unsigned long flags;
356 unsigned int num;
357 int err;
358
359 if (!name)
360 return NULL;
361
362 /* Select the queue we're interested in */
363 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
364
365 /* Queue shouldn't already be set up. */
366 if (readl(vm_dev->base + (vm_dev->version == 1 ?
367 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
368 err = -ENOENT;
369 goto error_available;
370 }
371
372 /* Allocate and fill out our active queue description */
373 info = kmalloc(sizeof(*info), GFP_KERNEL);
374 if (!info) {
375 err = -ENOMEM;
376 goto error_kmalloc;
377 }
378
379 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
380 if (num == 0) {
381 err = -ENOENT;
382 goto error_new_virtqueue;
383 }
384
385 /* Create the vring */
386 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
387 true, true, ctx, vm_notify, callback, name);
388 if (!vq) {
389 err = -ENOMEM;
390 goto error_new_virtqueue;
391 }
392
393 /* Activate the queue */
394 writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
395 if (vm_dev->version == 1) {
396 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
397
398 /*
399 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
400 * that doesn't fit in 32bit, fail the setup rather than
401 * pretending to be successful.
402 */
403 if (q_pfn >> 32) {
404 dev_err(&vdev->dev,
405 "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
406 0x1ULL << (32 + PAGE_SHIFT - 30));
407 err = -E2BIG;
408 goto error_bad_pfn;
409 }
410
411 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
412 writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
413 } else {
414 u64 addr;
415
416 addr = virtqueue_get_desc_addr(vq);
417 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
418 writel((u32)(addr >> 32),
419 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
420
421 addr = virtqueue_get_avail_addr(vq);
422 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
423 writel((u32)(addr >> 32),
424 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
425
426 addr = virtqueue_get_used_addr(vq);
427 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
428 writel((u32)(addr >> 32),
429 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
430
431 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
432 }
433
434 vq->priv = info;
435 info->vq = vq;
436
437 spin_lock_irqsave(&vm_dev->lock, flags);
438 list_add(&info->node, &vm_dev->virtqueues);
439 spin_unlock_irqrestore(&vm_dev->lock, flags);
440
441 return vq;
442
443error_bad_pfn:
444 vring_del_virtqueue(vq);
445error_new_virtqueue:
446 if (vm_dev->version == 1) {
447 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
448 } else {
449 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
450 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
451 }
452 kfree(info);
453error_kmalloc:
454error_available:
455 return ERR_PTR(err);
456}
457
458static int vm_find_vqs(struct virtio_device *vdev, unsigned nvqs,
459 struct virtqueue *vqs[],
460 vq_callback_t *callbacks[],
461 const char * const names[],
462 const bool *ctx,
463 struct irq_affinity *desc)
464{
465 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
466 int irq = platform_get_irq(vm_dev->pdev, 0);
467 int i, err, queue_idx = 0;
468
469 if (irq < 0)
470 return irq;
471
472 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
473 dev_name(&vdev->dev), vm_dev);
474 if (err)
475 return err;
476
477 for (i = 0; i < nvqs; ++i) {
478 if (!names[i]) {
479 vqs[i] = NULL;
480 continue;
481 }
482
483 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
484 ctx ? ctx[i] : false);
485 if (IS_ERR(vqs[i])) {
486 vm_del_vqs(vdev);
487 return PTR_ERR(vqs[i]);
488 }
489 }
490
491 return 0;
492}
493
494static const char *vm_bus_name(struct virtio_device *vdev)
495{
496 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
497
498 return vm_dev->pdev->name;
499}
500
501static const struct virtio_config_ops virtio_mmio_config_ops = {
502 .get = vm_get,
503 .set = vm_set,
504 .generation = vm_generation,
505 .get_status = vm_get_status,
506 .set_status = vm_set_status,
507 .reset = vm_reset,
508 .find_vqs = vm_find_vqs,
509 .del_vqs = vm_del_vqs,
510 .get_features = vm_get_features,
511 .finalize_features = vm_finalize_features,
512 .bus_name = vm_bus_name,
513};
514
515
516static void virtio_mmio_release_dev(struct device *_d)
517{
518 struct virtio_device *vdev =
519 container_of(_d, struct virtio_device, dev);
520 struct virtio_mmio_device *vm_dev =
521 container_of(vdev, struct virtio_mmio_device, vdev);
522 struct platform_device *pdev = vm_dev->pdev;
523
524 devm_kfree(&pdev->dev, vm_dev);
525}
526
527/* Platform device */
528
529static int virtio_mmio_probe(struct platform_device *pdev)
530{
531 struct virtio_mmio_device *vm_dev;
532 unsigned long magic;
533 int rc;
534
535 vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL);
536 if (!vm_dev)
537 return -ENOMEM;
538
539 vm_dev->vdev.dev.parent = &pdev->dev;
540 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
541 vm_dev->vdev.config = &virtio_mmio_config_ops;
542 vm_dev->pdev = pdev;
543 INIT_LIST_HEAD(&vm_dev->virtqueues);
544 spin_lock_init(&vm_dev->lock);
545
546 vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
547 if (IS_ERR(vm_dev->base))
548 return PTR_ERR(vm_dev->base);
549
550 /* Check magic value */
551 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
552 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
553 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
554 return -ENODEV;
555 }
556
557 /* Check device version */
558 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
559 if (vm_dev->version < 1 || vm_dev->version > 2) {
560 dev_err(&pdev->dev, "Version %ld not supported!\n",
561 vm_dev->version);
562 return -ENXIO;
563 }
564
565 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
566 if (vm_dev->vdev.id.device == 0) {
567 /*
568 * virtio-mmio device with an ID 0 is a (dummy) placeholder
569 * with no function. End probing now with no error reported.
570 */
571 return -ENODEV;
572 }
573 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
574
575 if (vm_dev->version == 1) {
576 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
577
578 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
579 /*
580 * In the legacy case, ensure our coherently-allocated virtio
581 * ring will be at an address expressable as a 32-bit PFN.
582 */
583 if (!rc)
584 dma_set_coherent_mask(&pdev->dev,
585 DMA_BIT_MASK(32 + PAGE_SHIFT));
586 } else {
587 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
588 }
589 if (rc)
590 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
591 if (rc)
592 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
593
594 platform_set_drvdata(pdev, vm_dev);
595
596 rc = register_virtio_device(&vm_dev->vdev);
597 if (rc)
598 put_device(&vm_dev->vdev.dev);
599
600 return rc;
601}
602
603static int virtio_mmio_remove(struct platform_device *pdev)
604{
605 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
606 unregister_virtio_device(&vm_dev->vdev);
607
608 return 0;
609}
610
611
612
613/* Devices list parameter */
614
615#if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
616
617static struct device vm_cmdline_parent = {
618 .init_name = "virtio-mmio-cmdline",
619};
620
621static int vm_cmdline_parent_registered;
622static int vm_cmdline_id;
623
624static int vm_cmdline_set(const char *device,
625 const struct kernel_param *kp)
626{
627 int err;
628 struct resource resources[2] = {};
629 char *str;
630 long long int base, size;
631 unsigned int irq;
632 int processed, consumed = 0;
633 struct platform_device *pdev;
634
635 /* Consume "size" part of the command line parameter */
636 size = memparse(device, &str);
637
638 /* Get "@<base>:<irq>[:<id>]" chunks */
639 processed = sscanf(str, "@%lli:%u%n:%d%n",
640 &base, &irq, &consumed,
641 &vm_cmdline_id, &consumed);
642
643 /*
644 * sscanf() must process at least 2 chunks; also there
645 * must be no extra characters after the last chunk, so
646 * str[consumed] must be '\0'
647 */
648 if (processed < 2 || str[consumed] || irq == 0)
649 return -EINVAL;
650
651 resources[0].flags = IORESOURCE_MEM;
652 resources[0].start = base;
653 resources[0].end = base + size - 1;
654
655 resources[1].flags = IORESOURCE_IRQ;
656 resources[1].start = resources[1].end = irq;
657
658 if (!vm_cmdline_parent_registered) {
659 err = device_register(&vm_cmdline_parent);
660 if (err) {
661 pr_err("Failed to register parent device!\n");
662 return err;
663 }
664 vm_cmdline_parent_registered = 1;
665 }
666
667 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
668 vm_cmdline_id,
669 (unsigned long long)resources[0].start,
670 (unsigned long long)resources[0].end,
671 (int)resources[1].start);
672
673 pdev = platform_device_register_resndata(&vm_cmdline_parent,
674 "virtio-mmio", vm_cmdline_id++,
675 resources, ARRAY_SIZE(resources), NULL, 0);
676
677 return PTR_ERR_OR_ZERO(pdev);
678}
679
680static int vm_cmdline_get_device(struct device *dev, void *data)
681{
682 char *buffer = data;
683 unsigned int len = strlen(buffer);
684 struct platform_device *pdev = to_platform_device(dev);
685
686 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
687 pdev->resource[0].end - pdev->resource[0].start + 1ULL,
688 (unsigned long long)pdev->resource[0].start,
689 (unsigned long long)pdev->resource[1].start,
690 pdev->id);
691 return 0;
692}
693
694static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
695{
696 buffer[0] = '\0';
697 device_for_each_child(&vm_cmdline_parent, buffer,
698 vm_cmdline_get_device);
699 return strlen(buffer) + 1;
700}
701
702static const struct kernel_param_ops vm_cmdline_param_ops = {
703 .set = vm_cmdline_set,
704 .get = vm_cmdline_get,
705};
706
707device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
708
709static int vm_unregister_cmdline_device(struct device *dev,
710 void *data)
711{
712 platform_device_unregister(to_platform_device(dev));
713
714 return 0;
715}
716
717static void vm_unregister_cmdline_devices(void)
718{
719 if (vm_cmdline_parent_registered) {
720 device_for_each_child(&vm_cmdline_parent, NULL,
721 vm_unregister_cmdline_device);
722 device_unregister(&vm_cmdline_parent);
723 vm_cmdline_parent_registered = 0;
724 }
725}
726
727#else
728
729static void vm_unregister_cmdline_devices(void)
730{
731}
732
733#endif
734
735/* Platform driver */
736
737static const struct of_device_id virtio_mmio_match[] = {
738 { .compatible = "virtio,mmio", },
739 {},
740};
741MODULE_DEVICE_TABLE(of, virtio_mmio_match);
742
743#ifdef CONFIG_ACPI
744static const struct acpi_device_id virtio_mmio_acpi_match[] = {
745 { "LNRO0005", },
746 { }
747};
748MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
749#endif
750
751static struct platform_driver virtio_mmio_driver = {
752 .probe = virtio_mmio_probe,
753 .remove = virtio_mmio_remove,
754 .driver = {
755 .name = "virtio-mmio",
756 .of_match_table = virtio_mmio_match,
757 .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
758 },
759};
760
761static int __init virtio_mmio_init(void)
762{
763 return platform_driver_register(&virtio_mmio_driver);
764}
765
766static void __exit virtio_mmio_exit(void)
767{
768 platform_driver_unregister(&virtio_mmio_driver);
769 vm_unregister_cmdline_devices();
770}
771
772module_init(virtio_mmio_init);
773module_exit(virtio_mmio_exit);
774
775MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
776MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
777MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Virtio memory mapped device driver
4 *
5 * Copyright 2011-2014, ARM Ltd.
6 *
7 * This module allows virtio devices to be used over a virtual, memory mapped
8 * platform device.
9 *
10 * The guest device(s) may be instantiated in one of three equivalent ways:
11 *
12 * 1. Static platform device in board's code, eg.:
13 *
14 * static struct platform_device v2m_virtio_device = {
15 * .name = "virtio-mmio",
16 * .id = -1,
17 * .num_resources = 2,
18 * .resource = (struct resource []) {
19 * {
20 * .start = 0x1001e000,
21 * .end = 0x1001e0ff,
22 * .flags = IORESOURCE_MEM,
23 * }, {
24 * .start = 42 + 32,
25 * .end = 42 + 32,
26 * .flags = IORESOURCE_IRQ,
27 * },
28 * }
29 * };
30 *
31 * 2. Device Tree node, eg.:
32 *
33 * virtio_block@1e000 {
34 * compatible = "virtio,mmio";
35 * reg = <0x1e000 0x100>;
36 * interrupts = <42>;
37 * }
38 *
39 * 3. Kernel module (or command line) parameter. Can be used more than once -
40 * one device will be created for each one. Syntax:
41 *
42 * [virtio_mmio.]device=<size>@<baseaddr>:<irq>[:<id>]
43 * where:
44 * <size> := size (can use standard suffixes like K, M or G)
45 * <baseaddr> := physical base address
46 * <irq> := interrupt number (as passed to request_irq())
47 * <id> := (optional) platform device id
48 * eg.:
49 * virtio_mmio.device=0x100@0x100b0000:48 \
50 * virtio_mmio.device=1K@0x1001e000:74
51 *
52 * Based on Virtio PCI driver by Anthony Liguori, copyright IBM Corp. 2007
53 */
54
55#define pr_fmt(fmt) "virtio-mmio: " fmt
56
57#include <linux/acpi.h>
58#include <linux/dma-mapping.h>
59#include <linux/highmem.h>
60#include <linux/interrupt.h>
61#include <linux/io.h>
62#include <linux/list.h>
63#include <linux/module.h>
64#include <linux/of.h>
65#include <linux/platform_device.h>
66#include <linux/pm.h>
67#include <linux/slab.h>
68#include <linux/spinlock.h>
69#include <linux/virtio.h>
70#include <linux/virtio_config.h>
71#include <uapi/linux/virtio_mmio.h>
72#include <linux/virtio_ring.h>
73
74
75
76/* The alignment to use between consumer and producer parts of vring.
77 * Currently hardcoded to the page size. */
78#define VIRTIO_MMIO_VRING_ALIGN PAGE_SIZE
79
80
81
82#define to_virtio_mmio_device(_plat_dev) \
83 container_of(_plat_dev, struct virtio_mmio_device, vdev)
84
85struct virtio_mmio_device {
86 struct virtio_device vdev;
87 struct platform_device *pdev;
88
89 void __iomem *base;
90 unsigned long version;
91
92 /* a list of queues so we can dispatch IRQs */
93 spinlock_t lock;
94 struct list_head virtqueues;
95};
96
97struct virtio_mmio_vq_info {
98 /* the actual virtqueue */
99 struct virtqueue *vq;
100
101 /* the list node for the virtqueues list */
102 struct list_head node;
103};
104
105
106
107/* Configuration interface */
108
109static u64 vm_get_features(struct virtio_device *vdev)
110{
111 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
112 u64 features;
113
114 writel(1, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
115 features = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
116 features <<= 32;
117
118 writel(0, vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES_SEL);
119 features |= readl(vm_dev->base + VIRTIO_MMIO_DEVICE_FEATURES);
120
121 return features;
122}
123
124static int vm_finalize_features(struct virtio_device *vdev)
125{
126 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
127
128 /* Give virtio_ring a chance to accept features. */
129 vring_transport_features(vdev);
130
131 /* Make sure there are no mixed devices */
132 if (vm_dev->version == 2 &&
133 !__virtio_test_bit(vdev, VIRTIO_F_VERSION_1)) {
134 dev_err(&vdev->dev, "New virtio-mmio devices (version 2) must provide VIRTIO_F_VERSION_1 feature!\n");
135 return -EINVAL;
136 }
137
138 writel(1, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
139 writel((u32)(vdev->features >> 32),
140 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
141
142 writel(0, vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES_SEL);
143 writel((u32)vdev->features,
144 vm_dev->base + VIRTIO_MMIO_DRIVER_FEATURES);
145
146 return 0;
147}
148
149static void vm_get(struct virtio_device *vdev, unsigned int offset,
150 void *buf, unsigned int len)
151{
152 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
153 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
154 u8 b;
155 __le16 w;
156 __le32 l;
157
158 if (vm_dev->version == 1) {
159 u8 *ptr = buf;
160 int i;
161
162 for (i = 0; i < len; i++)
163 ptr[i] = readb(base + offset + i);
164 return;
165 }
166
167 switch (len) {
168 case 1:
169 b = readb(base + offset);
170 memcpy(buf, &b, sizeof b);
171 break;
172 case 2:
173 w = cpu_to_le16(readw(base + offset));
174 memcpy(buf, &w, sizeof w);
175 break;
176 case 4:
177 l = cpu_to_le32(readl(base + offset));
178 memcpy(buf, &l, sizeof l);
179 break;
180 case 8:
181 l = cpu_to_le32(readl(base + offset));
182 memcpy(buf, &l, sizeof l);
183 l = cpu_to_le32(ioread32(base + offset + sizeof l));
184 memcpy(buf + sizeof l, &l, sizeof l);
185 break;
186 default:
187 BUG();
188 }
189}
190
191static void vm_set(struct virtio_device *vdev, unsigned int offset,
192 const void *buf, unsigned int len)
193{
194 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
195 void __iomem *base = vm_dev->base + VIRTIO_MMIO_CONFIG;
196 u8 b;
197 __le16 w;
198 __le32 l;
199
200 if (vm_dev->version == 1) {
201 const u8 *ptr = buf;
202 int i;
203
204 for (i = 0; i < len; i++)
205 writeb(ptr[i], base + offset + i);
206
207 return;
208 }
209
210 switch (len) {
211 case 1:
212 memcpy(&b, buf, sizeof b);
213 writeb(b, base + offset);
214 break;
215 case 2:
216 memcpy(&w, buf, sizeof w);
217 writew(le16_to_cpu(w), base + offset);
218 break;
219 case 4:
220 memcpy(&l, buf, sizeof l);
221 writel(le32_to_cpu(l), base + offset);
222 break;
223 case 8:
224 memcpy(&l, buf, sizeof l);
225 writel(le32_to_cpu(l), base + offset);
226 memcpy(&l, buf + sizeof l, sizeof l);
227 writel(le32_to_cpu(l), base + offset + sizeof l);
228 break;
229 default:
230 BUG();
231 }
232}
233
234static u32 vm_generation(struct virtio_device *vdev)
235{
236 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
237
238 if (vm_dev->version == 1)
239 return 0;
240 else
241 return readl(vm_dev->base + VIRTIO_MMIO_CONFIG_GENERATION);
242}
243
244static u8 vm_get_status(struct virtio_device *vdev)
245{
246 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
247
248 return readl(vm_dev->base + VIRTIO_MMIO_STATUS) & 0xff;
249}
250
251static void vm_set_status(struct virtio_device *vdev, u8 status)
252{
253 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
254
255 /* We should never be setting status to 0. */
256 BUG_ON(status == 0);
257
258 /*
259 * Per memory-barriers.txt, wmb() is not needed to guarantee
260 * that the cache coherent memory writes have completed
261 * before writing to the MMIO region.
262 */
263 writel(status, vm_dev->base + VIRTIO_MMIO_STATUS);
264}
265
266static void vm_reset(struct virtio_device *vdev)
267{
268 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
269
270 /* 0 status means a reset. */
271 writel(0, vm_dev->base + VIRTIO_MMIO_STATUS);
272}
273
274
275
276/* Transport interface */
277
278/* the notify function used when creating a virt queue */
279static bool vm_notify(struct virtqueue *vq)
280{
281 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
282
283 /* We write the queue's selector into the notification register to
284 * signal the other end */
285 writel(vq->index, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
286 return true;
287}
288
289static bool vm_notify_with_data(struct virtqueue *vq)
290{
291 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
292 u32 data = vring_notification_data(vq);
293
294 writel(data, vm_dev->base + VIRTIO_MMIO_QUEUE_NOTIFY);
295
296 return true;
297}
298
299/* Notify all virtqueues on an interrupt. */
300static irqreturn_t vm_interrupt(int irq, void *opaque)
301{
302 struct virtio_mmio_device *vm_dev = opaque;
303 struct virtio_mmio_vq_info *info;
304 unsigned long status;
305 unsigned long flags;
306 irqreturn_t ret = IRQ_NONE;
307
308 /* Read and acknowledge interrupts */
309 status = readl(vm_dev->base + VIRTIO_MMIO_INTERRUPT_STATUS);
310 writel(status, vm_dev->base + VIRTIO_MMIO_INTERRUPT_ACK);
311
312 if (unlikely(status & VIRTIO_MMIO_INT_CONFIG)) {
313 virtio_config_changed(&vm_dev->vdev);
314 ret = IRQ_HANDLED;
315 }
316
317 if (likely(status & VIRTIO_MMIO_INT_VRING)) {
318 spin_lock_irqsave(&vm_dev->lock, flags);
319 list_for_each_entry(info, &vm_dev->virtqueues, node)
320 ret |= vring_interrupt(irq, info->vq);
321 spin_unlock_irqrestore(&vm_dev->lock, flags);
322 }
323
324 return ret;
325}
326
327
328
329static void vm_del_vq(struct virtqueue *vq)
330{
331 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vq->vdev);
332 struct virtio_mmio_vq_info *info = vq->priv;
333 unsigned long flags;
334 unsigned int index = vq->index;
335
336 spin_lock_irqsave(&vm_dev->lock, flags);
337 list_del(&info->node);
338 spin_unlock_irqrestore(&vm_dev->lock, flags);
339
340 /* Select and deactivate the queue */
341 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
342 if (vm_dev->version == 1) {
343 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
344 } else {
345 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
346 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
347 }
348
349 vring_del_virtqueue(vq);
350
351 kfree(info);
352}
353
354static void vm_del_vqs(struct virtio_device *vdev)
355{
356 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
357 struct virtqueue *vq, *n;
358
359 list_for_each_entry_safe(vq, n, &vdev->vqs, list)
360 vm_del_vq(vq);
361
362 free_irq(platform_get_irq(vm_dev->pdev, 0), vm_dev);
363}
364
365static void vm_synchronize_cbs(struct virtio_device *vdev)
366{
367 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
368
369 synchronize_irq(platform_get_irq(vm_dev->pdev, 0));
370}
371
372static struct virtqueue *vm_setup_vq(struct virtio_device *vdev, unsigned int index,
373 void (*callback)(struct virtqueue *vq),
374 const char *name, bool ctx)
375{
376 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
377 bool (*notify)(struct virtqueue *vq);
378 struct virtio_mmio_vq_info *info;
379 struct virtqueue *vq;
380 unsigned long flags;
381 unsigned int num;
382 int err;
383
384 if (__virtio_test_bit(vdev, VIRTIO_F_NOTIFICATION_DATA))
385 notify = vm_notify_with_data;
386 else
387 notify = vm_notify;
388
389 if (!name)
390 return NULL;
391
392 /* Select the queue we're interested in */
393 writel(index, vm_dev->base + VIRTIO_MMIO_QUEUE_SEL);
394
395 /* Queue shouldn't already be set up. */
396 if (readl(vm_dev->base + (vm_dev->version == 1 ?
397 VIRTIO_MMIO_QUEUE_PFN : VIRTIO_MMIO_QUEUE_READY))) {
398 err = -ENOENT;
399 goto error_available;
400 }
401
402 /* Allocate and fill out our active queue description */
403 info = kmalloc(sizeof(*info), GFP_KERNEL);
404 if (!info) {
405 err = -ENOMEM;
406 goto error_kmalloc;
407 }
408
409 num = readl(vm_dev->base + VIRTIO_MMIO_QUEUE_NUM_MAX);
410 if (num == 0) {
411 err = -ENOENT;
412 goto error_new_virtqueue;
413 }
414
415 /* Create the vring */
416 vq = vring_create_virtqueue(index, num, VIRTIO_MMIO_VRING_ALIGN, vdev,
417 true, true, ctx, notify, callback, name);
418 if (!vq) {
419 err = -ENOMEM;
420 goto error_new_virtqueue;
421 }
422
423 vq->num_max = num;
424
425 /* Activate the queue */
426 writel(virtqueue_get_vring_size(vq), vm_dev->base + VIRTIO_MMIO_QUEUE_NUM);
427 if (vm_dev->version == 1) {
428 u64 q_pfn = virtqueue_get_desc_addr(vq) >> PAGE_SHIFT;
429
430 /*
431 * virtio-mmio v1 uses a 32bit QUEUE PFN. If we have something
432 * that doesn't fit in 32bit, fail the setup rather than
433 * pretending to be successful.
434 */
435 if (q_pfn >> 32) {
436 dev_err(&vdev->dev,
437 "platform bug: legacy virtio-mmio must not be used with RAM above 0x%llxGB\n",
438 0x1ULL << (32 + PAGE_SHIFT - 30));
439 err = -E2BIG;
440 goto error_bad_pfn;
441 }
442
443 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_QUEUE_ALIGN);
444 writel(q_pfn, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
445 } else {
446 u64 addr;
447
448 addr = virtqueue_get_desc_addr(vq);
449 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_LOW);
450 writel((u32)(addr >> 32),
451 vm_dev->base + VIRTIO_MMIO_QUEUE_DESC_HIGH);
452
453 addr = virtqueue_get_avail_addr(vq);
454 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_LOW);
455 writel((u32)(addr >> 32),
456 vm_dev->base + VIRTIO_MMIO_QUEUE_AVAIL_HIGH);
457
458 addr = virtqueue_get_used_addr(vq);
459 writel((u32)addr, vm_dev->base + VIRTIO_MMIO_QUEUE_USED_LOW);
460 writel((u32)(addr >> 32),
461 vm_dev->base + VIRTIO_MMIO_QUEUE_USED_HIGH);
462
463 writel(1, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
464 }
465
466 vq->priv = info;
467 info->vq = vq;
468
469 spin_lock_irqsave(&vm_dev->lock, flags);
470 list_add(&info->node, &vm_dev->virtqueues);
471 spin_unlock_irqrestore(&vm_dev->lock, flags);
472
473 return vq;
474
475error_bad_pfn:
476 vring_del_virtqueue(vq);
477error_new_virtqueue:
478 if (vm_dev->version == 1) {
479 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_PFN);
480 } else {
481 writel(0, vm_dev->base + VIRTIO_MMIO_QUEUE_READY);
482 WARN_ON(readl(vm_dev->base + VIRTIO_MMIO_QUEUE_READY));
483 }
484 kfree(info);
485error_kmalloc:
486error_available:
487 return ERR_PTR(err);
488}
489
490static int vm_find_vqs(struct virtio_device *vdev, unsigned int nvqs,
491 struct virtqueue *vqs[],
492 vq_callback_t *callbacks[],
493 const char * const names[],
494 const bool *ctx,
495 struct irq_affinity *desc)
496{
497 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
498 int irq = platform_get_irq(vm_dev->pdev, 0);
499 int i, err, queue_idx = 0;
500
501 if (irq < 0)
502 return irq;
503
504 err = request_irq(irq, vm_interrupt, IRQF_SHARED,
505 dev_name(&vdev->dev), vm_dev);
506 if (err)
507 return err;
508
509 if (of_property_read_bool(vm_dev->pdev->dev.of_node, "wakeup-source"))
510 enable_irq_wake(irq);
511
512 for (i = 0; i < nvqs; ++i) {
513 if (!names[i]) {
514 vqs[i] = NULL;
515 continue;
516 }
517
518 vqs[i] = vm_setup_vq(vdev, queue_idx++, callbacks[i], names[i],
519 ctx ? ctx[i] : false);
520 if (IS_ERR(vqs[i])) {
521 vm_del_vqs(vdev);
522 return PTR_ERR(vqs[i]);
523 }
524 }
525
526 return 0;
527}
528
529static const char *vm_bus_name(struct virtio_device *vdev)
530{
531 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
532
533 return vm_dev->pdev->name;
534}
535
536static bool vm_get_shm_region(struct virtio_device *vdev,
537 struct virtio_shm_region *region, u8 id)
538{
539 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
540 u64 len, addr;
541
542 /* Select the region we're interested in */
543 writel(id, vm_dev->base + VIRTIO_MMIO_SHM_SEL);
544
545 /* Read the region size */
546 len = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_LOW);
547 len |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_LEN_HIGH) << 32;
548
549 region->len = len;
550
551 /* Check if region length is -1. If that's the case, the shared memory
552 * region does not exist and there is no need to proceed further.
553 */
554 if (len == ~(u64)0)
555 return false;
556
557 /* Read the region base address */
558 addr = (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_LOW);
559 addr |= (u64) readl(vm_dev->base + VIRTIO_MMIO_SHM_BASE_HIGH) << 32;
560
561 region->addr = addr;
562
563 return true;
564}
565
566static const struct virtio_config_ops virtio_mmio_config_ops = {
567 .get = vm_get,
568 .set = vm_set,
569 .generation = vm_generation,
570 .get_status = vm_get_status,
571 .set_status = vm_set_status,
572 .reset = vm_reset,
573 .find_vqs = vm_find_vqs,
574 .del_vqs = vm_del_vqs,
575 .get_features = vm_get_features,
576 .finalize_features = vm_finalize_features,
577 .bus_name = vm_bus_name,
578 .get_shm_region = vm_get_shm_region,
579 .synchronize_cbs = vm_synchronize_cbs,
580};
581
582#ifdef CONFIG_PM_SLEEP
583static int virtio_mmio_freeze(struct device *dev)
584{
585 struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
586
587 return virtio_device_freeze(&vm_dev->vdev);
588}
589
590static int virtio_mmio_restore(struct device *dev)
591{
592 struct virtio_mmio_device *vm_dev = dev_get_drvdata(dev);
593
594 if (vm_dev->version == 1)
595 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
596
597 return virtio_device_restore(&vm_dev->vdev);
598}
599
600static const struct dev_pm_ops virtio_mmio_pm_ops = {
601 SET_SYSTEM_SLEEP_PM_OPS(virtio_mmio_freeze, virtio_mmio_restore)
602};
603#endif
604
605static void virtio_mmio_release_dev(struct device *_d)
606{
607 struct virtio_device *vdev =
608 container_of(_d, struct virtio_device, dev);
609 struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev);
610
611 kfree(vm_dev);
612}
613
614/* Platform device */
615
616static int virtio_mmio_probe(struct platform_device *pdev)
617{
618 struct virtio_mmio_device *vm_dev;
619 unsigned long magic;
620 int rc;
621
622 vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL);
623 if (!vm_dev)
624 return -ENOMEM;
625
626 vm_dev->vdev.dev.parent = &pdev->dev;
627 vm_dev->vdev.dev.release = virtio_mmio_release_dev;
628 vm_dev->vdev.config = &virtio_mmio_config_ops;
629 vm_dev->pdev = pdev;
630 INIT_LIST_HEAD(&vm_dev->virtqueues);
631 spin_lock_init(&vm_dev->lock);
632
633 vm_dev->base = devm_platform_ioremap_resource(pdev, 0);
634 if (IS_ERR(vm_dev->base)) {
635 rc = PTR_ERR(vm_dev->base);
636 goto free_vm_dev;
637 }
638
639 /* Check magic value */
640 magic = readl(vm_dev->base + VIRTIO_MMIO_MAGIC_VALUE);
641 if (magic != ('v' | 'i' << 8 | 'r' << 16 | 't' << 24)) {
642 dev_warn(&pdev->dev, "Wrong magic value 0x%08lx!\n", magic);
643 rc = -ENODEV;
644 goto free_vm_dev;
645 }
646
647 /* Check device version */
648 vm_dev->version = readl(vm_dev->base + VIRTIO_MMIO_VERSION);
649 if (vm_dev->version < 1 || vm_dev->version > 2) {
650 dev_err(&pdev->dev, "Version %ld not supported!\n",
651 vm_dev->version);
652 rc = -ENXIO;
653 goto free_vm_dev;
654 }
655
656 vm_dev->vdev.id.device = readl(vm_dev->base + VIRTIO_MMIO_DEVICE_ID);
657 if (vm_dev->vdev.id.device == 0) {
658 /*
659 * virtio-mmio device with an ID 0 is a (dummy) placeholder
660 * with no function. End probing now with no error reported.
661 */
662 rc = -ENODEV;
663 goto free_vm_dev;
664 }
665 vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
666
667 if (vm_dev->version == 1) {
668 writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
669
670 rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
671 /*
672 * In the legacy case, ensure our coherently-allocated virtio
673 * ring will be at an address expressable as a 32-bit PFN.
674 */
675 if (!rc)
676 dma_set_coherent_mask(&pdev->dev,
677 DMA_BIT_MASK(32 + PAGE_SHIFT));
678 } else {
679 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
680 }
681 if (rc)
682 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
683 if (rc)
684 dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
685
686 platform_set_drvdata(pdev, vm_dev);
687
688 rc = register_virtio_device(&vm_dev->vdev);
689 if (rc)
690 put_device(&vm_dev->vdev.dev);
691
692 return rc;
693
694free_vm_dev:
695 kfree(vm_dev);
696 return rc;
697}
698
699static int virtio_mmio_remove(struct platform_device *pdev)
700{
701 struct virtio_mmio_device *vm_dev = platform_get_drvdata(pdev);
702 unregister_virtio_device(&vm_dev->vdev);
703
704 return 0;
705}
706
707
708
709/* Devices list parameter */
710
711#if defined(CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES)
712
713static struct device vm_cmdline_parent = {
714 .init_name = "virtio-mmio-cmdline",
715};
716
717static int vm_cmdline_parent_registered;
718static int vm_cmdline_id;
719
720static int vm_cmdline_set(const char *device,
721 const struct kernel_param *kp)
722{
723 int err;
724 struct resource resources[2] = {};
725 char *str;
726 long long base, size;
727 unsigned int irq;
728 int processed, consumed = 0;
729 struct platform_device *pdev;
730
731 /* Consume "size" part of the command line parameter */
732 size = memparse(device, &str);
733
734 /* Get "@<base>:<irq>[:<id>]" chunks */
735 processed = sscanf(str, "@%lli:%u%n:%d%n",
736 &base, &irq, &consumed,
737 &vm_cmdline_id, &consumed);
738
739 /*
740 * sscanf() must process at least 2 chunks; also there
741 * must be no extra characters after the last chunk, so
742 * str[consumed] must be '\0'
743 */
744 if (processed < 2 || str[consumed] || irq == 0)
745 return -EINVAL;
746
747 resources[0].flags = IORESOURCE_MEM;
748 resources[0].start = base;
749 resources[0].end = base + size - 1;
750
751 resources[1].flags = IORESOURCE_IRQ;
752 resources[1].start = resources[1].end = irq;
753
754 if (!vm_cmdline_parent_registered) {
755 err = device_register(&vm_cmdline_parent);
756 if (err) {
757 put_device(&vm_cmdline_parent);
758 pr_err("Failed to register parent device!\n");
759 return err;
760 }
761 vm_cmdline_parent_registered = 1;
762 }
763
764 pr_info("Registering device virtio-mmio.%d at 0x%llx-0x%llx, IRQ %d.\n",
765 vm_cmdline_id,
766 (unsigned long long)resources[0].start,
767 (unsigned long long)resources[0].end,
768 (int)resources[1].start);
769
770 pdev = platform_device_register_resndata(&vm_cmdline_parent,
771 "virtio-mmio", vm_cmdline_id++,
772 resources, ARRAY_SIZE(resources), NULL, 0);
773
774 return PTR_ERR_OR_ZERO(pdev);
775}
776
777static int vm_cmdline_get_device(struct device *dev, void *data)
778{
779 char *buffer = data;
780 unsigned int len = strlen(buffer);
781 struct platform_device *pdev = to_platform_device(dev);
782
783 snprintf(buffer + len, PAGE_SIZE - len, "0x%llx@0x%llx:%llu:%d\n",
784 pdev->resource[0].end - pdev->resource[0].start + 1ULL,
785 (unsigned long long)pdev->resource[0].start,
786 (unsigned long long)pdev->resource[1].start,
787 pdev->id);
788 return 0;
789}
790
791static int vm_cmdline_get(char *buffer, const struct kernel_param *kp)
792{
793 buffer[0] = '\0';
794 device_for_each_child(&vm_cmdline_parent, buffer,
795 vm_cmdline_get_device);
796 return strlen(buffer) + 1;
797}
798
799static const struct kernel_param_ops vm_cmdline_param_ops = {
800 .set = vm_cmdline_set,
801 .get = vm_cmdline_get,
802};
803
804device_param_cb(device, &vm_cmdline_param_ops, NULL, S_IRUSR);
805
806static int vm_unregister_cmdline_device(struct device *dev,
807 void *data)
808{
809 platform_device_unregister(to_platform_device(dev));
810
811 return 0;
812}
813
814static void vm_unregister_cmdline_devices(void)
815{
816 if (vm_cmdline_parent_registered) {
817 device_for_each_child(&vm_cmdline_parent, NULL,
818 vm_unregister_cmdline_device);
819 device_unregister(&vm_cmdline_parent);
820 vm_cmdline_parent_registered = 0;
821 }
822}
823
824#else
825
826static void vm_unregister_cmdline_devices(void)
827{
828}
829
830#endif
831
832/* Platform driver */
833
834static const struct of_device_id virtio_mmio_match[] = {
835 { .compatible = "virtio,mmio", },
836 {},
837};
838MODULE_DEVICE_TABLE(of, virtio_mmio_match);
839
840#ifdef CONFIG_ACPI
841static const struct acpi_device_id virtio_mmio_acpi_match[] = {
842 { "LNRO0005", },
843 { }
844};
845MODULE_DEVICE_TABLE(acpi, virtio_mmio_acpi_match);
846#endif
847
848static struct platform_driver virtio_mmio_driver = {
849 .probe = virtio_mmio_probe,
850 .remove = virtio_mmio_remove,
851 .driver = {
852 .name = "virtio-mmio",
853 .of_match_table = virtio_mmio_match,
854 .acpi_match_table = ACPI_PTR(virtio_mmio_acpi_match),
855#ifdef CONFIG_PM_SLEEP
856 .pm = &virtio_mmio_pm_ops,
857#endif
858 },
859};
860
861static int __init virtio_mmio_init(void)
862{
863 return platform_driver_register(&virtio_mmio_driver);
864}
865
866static void __exit virtio_mmio_exit(void)
867{
868 platform_driver_unregister(&virtio_mmio_driver);
869 vm_unregister_cmdline_devices();
870}
871
872module_init(virtio_mmio_init);
873module_exit(virtio_mmio_exit);
874
875MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
876MODULE_DESCRIPTION("Platform bus driver for memory mapped virtio devices");
877MODULE_LICENSE("GPL");