Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Mediated virtual PCI display host device driver
4 *
5 * See mdpy-defs.h for device specs
6 *
7 * (c) Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * based on mtty driver which is:
10 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11 * Author: Neo Jia <cjia@nvidia.com>
12 * Kirti Wankhede <kwankhede@nvidia.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/kernel.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/cdev.h>
24#include <linux/vfio.h>
25#include <linux/iommu.h>
26#include <linux/sysfs.h>
27#include <linux/mdev.h>
28#include <linux/pci.h>
29#include <drm/drm_fourcc.h>
30#include "mdpy-defs.h"
31
32#define MDPY_NAME "mdpy"
33#define MDPY_CLASS_NAME "mdpy"
34
35#define MDPY_CONFIG_SPACE_SIZE 0xff
36#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
37#define MDPY_DISPLAY_REGION 16
38
39#define STORE_LE16(addr, val) (*(u16 *)addr = val)
40#define STORE_LE32(addr, val) (*(u32 *)addr = val)
41
42
43MODULE_LICENSE("GPL v2");
44
45#define MDPY_TYPE_1 "vga"
46#define MDPY_TYPE_2 "xga"
47#define MDPY_TYPE_3 "hd"
48
49static struct mdpy_type {
50 struct mdev_type type;
51 u32 format;
52 u32 bytepp;
53 u32 width;
54 u32 height;
55} mdpy_types[] = {
56 {
57 .type.sysfs_name = MDPY_TYPE_1,
58 .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
59 .format = DRM_FORMAT_XRGB8888,
60 .bytepp = 4,
61 .width = 640,
62 .height = 480,
63 }, {
64 .type.sysfs_name = MDPY_TYPE_2,
65 .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
66 .format = DRM_FORMAT_XRGB8888,
67 .bytepp = 4,
68 .width = 1024,
69 .height = 768,
70 }, {
71 .type.sysfs_name = MDPY_TYPE_3,
72 .type.pretty_name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
73 .format = DRM_FORMAT_XRGB8888,
74 .bytepp = 4,
75 .width = 1920,
76 .height = 1080,
77 },
78};
79
80static struct mdev_type *mdpy_mdev_types[] = {
81 &mdpy_types[0].type,
82 &mdpy_types[1].type,
83 &mdpy_types[2].type,
84};
85
86static dev_t mdpy_devt;
87static struct class *mdpy_class;
88static struct cdev mdpy_cdev;
89static struct device mdpy_dev;
90static struct mdev_parent mdpy_parent;
91static const struct vfio_device_ops mdpy_dev_ops;
92
93/* State of each mdev device */
94struct mdev_state {
95 struct vfio_device vdev;
96 u8 *vconfig;
97 u32 bar_mask;
98 struct mutex ops_lock;
99 struct mdev_device *mdev;
100 struct vfio_device_info dev_info;
101
102 const struct mdpy_type *type;
103 u32 memsize;
104 void *memblk;
105};
106
107static void mdpy_create_config_space(struct mdev_state *mdev_state)
108{
109 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
110 MDPY_PCI_VENDOR_ID);
111 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
112 MDPY_PCI_DEVICE_ID);
113 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
114 MDPY_PCI_SUBVENDOR_ID);
115 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
116 MDPY_PCI_SUBDEVICE_ID);
117
118 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
119 PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
120 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
121 PCI_STATUS_CAP_LIST);
122 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
123 PCI_CLASS_DISPLAY_OTHER);
124 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
125
126 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
127 PCI_BASE_ADDRESS_SPACE_MEMORY |
128 PCI_BASE_ADDRESS_MEM_TYPE_32 |
129 PCI_BASE_ADDRESS_MEM_PREFETCH);
130 mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
131
132 /* vendor specific capability for the config registers */
133 mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
134 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
135 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
136 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
137 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
138 mdev_state->type->format);
139 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
140 mdev_state->type->width);
141 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
142 mdev_state->type->height);
143}
144
145static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
146 char *buf, u32 count)
147{
148 struct device *dev = mdev_dev(mdev_state->mdev);
149 u32 cfg_addr;
150
151 switch (offset) {
152 case PCI_BASE_ADDRESS_0:
153 cfg_addr = *(u32 *)buf;
154
155 if (cfg_addr == 0xffffffff) {
156 cfg_addr = (cfg_addr & mdev_state->bar_mask);
157 } else {
158 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
159 if (cfg_addr)
160 dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
161 }
162
163 cfg_addr |= (mdev_state->vconfig[offset] &
164 ~PCI_BASE_ADDRESS_MEM_MASK);
165 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
166 break;
167 }
168}
169
170static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
171 size_t count, loff_t pos, bool is_write)
172{
173 int ret = 0;
174
175 mutex_lock(&mdev_state->ops_lock);
176
177 if (pos < MDPY_CONFIG_SPACE_SIZE) {
178 if (is_write)
179 handle_pci_cfg_write(mdev_state, pos, buf, count);
180 else
181 memcpy(buf, (mdev_state->vconfig + pos), count);
182
183 } else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
184 (pos + count <=
185 MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
186 pos -= MDPY_MEMORY_BAR_OFFSET;
187 if (is_write)
188 memcpy(mdev_state->memblk, buf, count);
189 else
190 memcpy(buf, mdev_state->memblk, count);
191
192 } else {
193 dev_info(mdev_state->vdev.dev,
194 "%s: %s @0x%llx (unhandled)\n", __func__,
195 is_write ? "WR" : "RD", pos);
196 ret = -1;
197 goto accessfailed;
198 }
199
200 ret = count;
201
202
203accessfailed:
204 mutex_unlock(&mdev_state->ops_lock);
205
206 return ret;
207}
208
209static int mdpy_reset(struct mdev_state *mdev_state)
210{
211 u32 stride, i;
212
213 /* initialize with gray gradient */
214 stride = mdev_state->type->width * mdev_state->type->bytepp;
215 for (i = 0; i < mdev_state->type->height; i++)
216 memset(mdev_state->memblk + i * stride,
217 i * 255 / mdev_state->type->height,
218 stride);
219 return 0;
220}
221
222static int mdpy_init_dev(struct vfio_device *vdev)
223{
224 struct mdev_state *mdev_state =
225 container_of(vdev, struct mdev_state, vdev);
226 struct mdev_device *mdev = to_mdev_device(vdev->dev);
227 const struct mdpy_type *type =
228 container_of(mdev->type, struct mdpy_type, type);
229 u32 fbsize;
230 int ret = -ENOMEM;
231
232 mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
233 if (!mdev_state->vconfig)
234 return ret;
235
236 fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
237
238 mdev_state->memblk = vmalloc_user(fbsize);
239 if (!mdev_state->memblk)
240 goto out_vconfig;
241
242 mutex_init(&mdev_state->ops_lock);
243 mdev_state->mdev = mdev;
244 mdev_state->type = type;
245 mdev_state->memsize = fbsize;
246 mdpy_create_config_space(mdev_state);
247 mdpy_reset(mdev_state);
248
249 dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
250 type->width, type->height);
251 return 0;
252
253out_vconfig:
254 kfree(mdev_state->vconfig);
255 return ret;
256}
257
258static int mdpy_probe(struct mdev_device *mdev)
259{
260 struct mdev_state *mdev_state;
261 int ret;
262
263 mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
264 &mdpy_dev_ops);
265 if (IS_ERR(mdev_state))
266 return PTR_ERR(mdev_state);
267
268 ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
269 if (ret)
270 goto err_put_vdev;
271 dev_set_drvdata(&mdev->dev, mdev_state);
272 return 0;
273
274err_put_vdev:
275 vfio_put_device(&mdev_state->vdev);
276 return ret;
277}
278
279static void mdpy_release_dev(struct vfio_device *vdev)
280{
281 struct mdev_state *mdev_state =
282 container_of(vdev, struct mdev_state, vdev);
283
284 vfree(mdev_state->memblk);
285 kfree(mdev_state->vconfig);
286}
287
288static void mdpy_remove(struct mdev_device *mdev)
289{
290 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
291
292 dev_info(&mdev->dev, "%s\n", __func__);
293
294 vfio_unregister_group_dev(&mdev_state->vdev);
295 vfio_put_device(&mdev_state->vdev);
296}
297
298static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
299 size_t count, loff_t *ppos)
300{
301 struct mdev_state *mdev_state =
302 container_of(vdev, struct mdev_state, vdev);
303 unsigned int done = 0;
304 int ret;
305
306 while (count) {
307 size_t filled;
308
309 if (count >= 4 && !(*ppos % 4)) {
310 u32 val;
311
312 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
313 *ppos, false);
314 if (ret <= 0)
315 goto read_err;
316
317 if (copy_to_user(buf, &val, sizeof(val)))
318 goto read_err;
319
320 filled = 4;
321 } else if (count >= 2 && !(*ppos % 2)) {
322 u16 val;
323
324 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
325 *ppos, false);
326 if (ret <= 0)
327 goto read_err;
328
329 if (copy_to_user(buf, &val, sizeof(val)))
330 goto read_err;
331
332 filled = 2;
333 } else {
334 u8 val;
335
336 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
337 *ppos, false);
338 if (ret <= 0)
339 goto read_err;
340
341 if (copy_to_user(buf, &val, sizeof(val)))
342 goto read_err;
343
344 filled = 1;
345 }
346
347 count -= filled;
348 done += filled;
349 *ppos += filled;
350 buf += filled;
351 }
352
353 return done;
354
355read_err:
356 return -EFAULT;
357}
358
359static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
360 size_t count, loff_t *ppos)
361{
362 struct mdev_state *mdev_state =
363 container_of(vdev, struct mdev_state, vdev);
364 unsigned int done = 0;
365 int ret;
366
367 while (count) {
368 size_t filled;
369
370 if (count >= 4 && !(*ppos % 4)) {
371 u32 val;
372
373 if (copy_from_user(&val, buf, sizeof(val)))
374 goto write_err;
375
376 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
377 *ppos, true);
378 if (ret <= 0)
379 goto write_err;
380
381 filled = 4;
382 } else if (count >= 2 && !(*ppos % 2)) {
383 u16 val;
384
385 if (copy_from_user(&val, buf, sizeof(val)))
386 goto write_err;
387
388 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
389 *ppos, true);
390 if (ret <= 0)
391 goto write_err;
392
393 filled = 2;
394 } else {
395 u8 val;
396
397 if (copy_from_user(&val, buf, sizeof(val)))
398 goto write_err;
399
400 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
401 *ppos, true);
402 if (ret <= 0)
403 goto write_err;
404
405 filled = 1;
406 }
407 count -= filled;
408 done += filled;
409 *ppos += filled;
410 buf += filled;
411 }
412
413 return done;
414write_err:
415 return -EFAULT;
416}
417
418static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
419{
420 struct mdev_state *mdev_state =
421 container_of(vdev, struct mdev_state, vdev);
422
423 if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
424 return -EINVAL;
425 if (vma->vm_end < vma->vm_start)
426 return -EINVAL;
427 if (vma->vm_end - vma->vm_start > mdev_state->memsize)
428 return -EINVAL;
429 if ((vma->vm_flags & VM_SHARED) == 0)
430 return -EINVAL;
431
432 return remap_vmalloc_range(vma, mdev_state->memblk, 0);
433}
434
435static int mdpy_get_region_info(struct mdev_state *mdev_state,
436 struct vfio_region_info *region_info,
437 u16 *cap_type_id, void **cap_type)
438{
439 if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
440 region_info->index != MDPY_DISPLAY_REGION)
441 return -EINVAL;
442
443 switch (region_info->index) {
444 case VFIO_PCI_CONFIG_REGION_INDEX:
445 region_info->offset = 0;
446 region_info->size = MDPY_CONFIG_SPACE_SIZE;
447 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
448 VFIO_REGION_INFO_FLAG_WRITE);
449 break;
450 case VFIO_PCI_BAR0_REGION_INDEX:
451 case MDPY_DISPLAY_REGION:
452 region_info->offset = MDPY_MEMORY_BAR_OFFSET;
453 region_info->size = mdev_state->memsize;
454 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
455 VFIO_REGION_INFO_FLAG_WRITE |
456 VFIO_REGION_INFO_FLAG_MMAP);
457 break;
458 default:
459 region_info->size = 0;
460 region_info->offset = 0;
461 region_info->flags = 0;
462 }
463
464 return 0;
465}
466
467static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
468{
469 irq_info->count = 0;
470 return 0;
471}
472
473static int mdpy_get_device_info(struct vfio_device_info *dev_info)
474{
475 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
476 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
477 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
478 return 0;
479}
480
481static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
482 struct vfio_device_gfx_plane_info *plane)
483{
484 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
485 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
486 VFIO_GFX_PLANE_TYPE_REGION))
487 return 0;
488 return -EINVAL;
489 }
490
491 if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
492 return -EINVAL;
493
494 plane->drm_format = mdev_state->type->format;
495 plane->width = mdev_state->type->width;
496 plane->height = mdev_state->type->height;
497 plane->stride = (mdev_state->type->width *
498 mdev_state->type->bytepp);
499 plane->size = mdev_state->memsize;
500 plane->region_index = MDPY_DISPLAY_REGION;
501
502 /* unused */
503 plane->drm_format_mod = 0;
504 plane->x_pos = 0;
505 plane->y_pos = 0;
506 plane->x_hot = 0;
507 plane->y_hot = 0;
508
509 return 0;
510}
511
512static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
513 unsigned long arg)
514{
515 int ret = 0;
516 unsigned long minsz;
517 struct mdev_state *mdev_state =
518 container_of(vdev, struct mdev_state, vdev);
519
520 switch (cmd) {
521 case VFIO_DEVICE_GET_INFO:
522 {
523 struct vfio_device_info info;
524
525 minsz = offsetofend(struct vfio_device_info, num_irqs);
526
527 if (copy_from_user(&info, (void __user *)arg, minsz))
528 return -EFAULT;
529
530 if (info.argsz < minsz)
531 return -EINVAL;
532
533 ret = mdpy_get_device_info(&info);
534 if (ret)
535 return ret;
536
537 memcpy(&mdev_state->dev_info, &info, sizeof(info));
538
539 if (copy_to_user((void __user *)arg, &info, minsz))
540 return -EFAULT;
541
542 return 0;
543 }
544 case VFIO_DEVICE_GET_REGION_INFO:
545 {
546 struct vfio_region_info info;
547 u16 cap_type_id = 0;
548 void *cap_type = NULL;
549
550 minsz = offsetofend(struct vfio_region_info, offset);
551
552 if (copy_from_user(&info, (void __user *)arg, minsz))
553 return -EFAULT;
554
555 if (info.argsz < minsz)
556 return -EINVAL;
557
558 ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
559 &cap_type);
560 if (ret)
561 return ret;
562
563 if (copy_to_user((void __user *)arg, &info, minsz))
564 return -EFAULT;
565
566 return 0;
567 }
568
569 case VFIO_DEVICE_GET_IRQ_INFO:
570 {
571 struct vfio_irq_info info;
572
573 minsz = offsetofend(struct vfio_irq_info, count);
574
575 if (copy_from_user(&info, (void __user *)arg, minsz))
576 return -EFAULT;
577
578 if ((info.argsz < minsz) ||
579 (info.index >= mdev_state->dev_info.num_irqs))
580 return -EINVAL;
581
582 ret = mdpy_get_irq_info(&info);
583 if (ret)
584 return ret;
585
586 if (copy_to_user((void __user *)arg, &info, minsz))
587 return -EFAULT;
588
589 return 0;
590 }
591
592 case VFIO_DEVICE_QUERY_GFX_PLANE:
593 {
594 struct vfio_device_gfx_plane_info plane = {};
595
596 minsz = offsetofend(struct vfio_device_gfx_plane_info,
597 region_index);
598
599 if (copy_from_user(&plane, (void __user *)arg, minsz))
600 return -EFAULT;
601
602 if (plane.argsz < minsz)
603 return -EINVAL;
604
605 ret = mdpy_query_gfx_plane(mdev_state, &plane);
606 if (ret)
607 return ret;
608
609 if (copy_to_user((void __user *)arg, &plane, minsz))
610 return -EFAULT;
611
612 return 0;
613 }
614
615 case VFIO_DEVICE_SET_IRQS:
616 return -EINVAL;
617
618 case VFIO_DEVICE_RESET:
619 return mdpy_reset(mdev_state);
620 }
621 return -ENOTTY;
622}
623
624static ssize_t
625resolution_show(struct device *dev, struct device_attribute *attr,
626 char *buf)
627{
628 struct mdev_state *mdev_state = dev_get_drvdata(dev);
629
630 return sprintf(buf, "%dx%d\n",
631 mdev_state->type->width,
632 mdev_state->type->height);
633}
634static DEVICE_ATTR_RO(resolution);
635
636static struct attribute *mdev_dev_attrs[] = {
637 &dev_attr_resolution.attr,
638 NULL,
639};
640
641static const struct attribute_group mdev_dev_group = {
642 .name = "vendor",
643 .attrs = mdev_dev_attrs,
644};
645
646static const struct attribute_group *mdev_dev_groups[] = {
647 &mdev_dev_group,
648 NULL,
649};
650
651static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
652{
653 struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
654
655 return sprintf(buf, "virtual display, %dx%d framebuffer\n",
656 type->width, type->height);
657}
658
659static const struct vfio_device_ops mdpy_dev_ops = {
660 .init = mdpy_init_dev,
661 .release = mdpy_release_dev,
662 .read = mdpy_read,
663 .write = mdpy_write,
664 .ioctl = mdpy_ioctl,
665 .mmap = mdpy_mmap,
666 .bind_iommufd = vfio_iommufd_emulated_bind,
667 .unbind_iommufd = vfio_iommufd_emulated_unbind,
668 .attach_ioas = vfio_iommufd_emulated_attach_ioas,
669 .detach_ioas = vfio_iommufd_emulated_detach_ioas,
670};
671
672static struct mdev_driver mdpy_driver = {
673 .device_api = VFIO_DEVICE_API_PCI_STRING,
674 .max_instances = 4,
675 .driver = {
676 .name = "mdpy",
677 .owner = THIS_MODULE,
678 .mod_name = KBUILD_MODNAME,
679 .dev_groups = mdev_dev_groups,
680 },
681 .probe = mdpy_probe,
682 .remove = mdpy_remove,
683 .show_description = mdpy_show_description,
684};
685
686static const struct file_operations vd_fops = {
687 .owner = THIS_MODULE,
688};
689
690static void mdpy_device_release(struct device *dev)
691{
692 /* nothing */
693}
694
695static int __init mdpy_dev_init(void)
696{
697 int ret = 0;
698
699 ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
700 if (ret < 0) {
701 pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
702 return ret;
703 }
704 cdev_init(&mdpy_cdev, &vd_fops);
705 cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
706 pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
707
708 ret = mdev_register_driver(&mdpy_driver);
709 if (ret)
710 goto err_cdev;
711
712 mdpy_class = class_create(MDPY_CLASS_NAME);
713 if (IS_ERR(mdpy_class)) {
714 pr_err("Error: failed to register mdpy_dev class\n");
715 ret = PTR_ERR(mdpy_class);
716 goto err_driver;
717 }
718 mdpy_dev.class = mdpy_class;
719 mdpy_dev.release = mdpy_device_release;
720 dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
721
722 ret = device_register(&mdpy_dev);
723 if (ret)
724 goto err_put;
725
726 ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
727 mdpy_mdev_types,
728 ARRAY_SIZE(mdpy_mdev_types));
729 if (ret)
730 goto err_device;
731
732 return 0;
733
734err_device:
735 device_del(&mdpy_dev);
736err_put:
737 put_device(&mdpy_dev);
738 class_destroy(mdpy_class);
739err_driver:
740 mdev_unregister_driver(&mdpy_driver);
741err_cdev:
742 cdev_del(&mdpy_cdev);
743 unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
744 return ret;
745}
746
747static void __exit mdpy_dev_exit(void)
748{
749 mdpy_dev.bus = NULL;
750 mdev_unregister_parent(&mdpy_parent);
751
752 device_unregister(&mdpy_dev);
753 mdev_unregister_driver(&mdpy_driver);
754 cdev_del(&mdpy_cdev);
755 unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
756 class_destroy(mdpy_class);
757 mdpy_class = NULL;
758}
759
760module_param_named(count, mdpy_driver.max_instances, int, 0444);
761MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
762
763module_init(mdpy_dev_init)
764module_exit(mdpy_dev_exit)
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Mediated virtual PCI display host device driver
4 *
5 * See mdpy-defs.h for device specs
6 *
7 * (c) Gerd Hoffmann <kraxel@redhat.com>
8 *
9 * based on mtty driver which is:
10 * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
11 * Author: Neo Jia <cjia@nvidia.com>
12 * Kirti Wankhede <kwankhede@nvidia.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/kernel.h>
22#include <linux/slab.h>
23#include <linux/vmalloc.h>
24#include <linux/cdev.h>
25#include <linux/vfio.h>
26#include <linux/iommu.h>
27#include <linux/sysfs.h>
28#include <linux/mdev.h>
29#include <linux/pci.h>
30#include <drm/drm_fourcc.h>
31#include "mdpy-defs.h"
32
33#define MDPY_NAME "mdpy"
34#define MDPY_CLASS_NAME "mdpy"
35
36#define MDPY_CONFIG_SPACE_SIZE 0xff
37#define MDPY_MEMORY_BAR_OFFSET PAGE_SIZE
38#define MDPY_DISPLAY_REGION 16
39
40#define STORE_LE16(addr, val) (*(u16 *)addr = val)
41#define STORE_LE32(addr, val) (*(u32 *)addr = val)
42
43
44MODULE_LICENSE("GPL v2");
45
46static int max_devices = 4;
47module_param_named(count, max_devices, int, 0444);
48MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
49
50
51#define MDPY_TYPE_1 "vga"
52#define MDPY_TYPE_2 "xga"
53#define MDPY_TYPE_3 "hd"
54
55static const struct mdpy_type {
56 const char *name;
57 u32 format;
58 u32 bytepp;
59 u32 width;
60 u32 height;
61} mdpy_types[] = {
62 {
63 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_1,
64 .format = DRM_FORMAT_XRGB8888,
65 .bytepp = 4,
66 .width = 640,
67 .height = 480,
68 }, {
69 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_2,
70 .format = DRM_FORMAT_XRGB8888,
71 .bytepp = 4,
72 .width = 1024,
73 .height = 768,
74 }, {
75 .name = MDPY_CLASS_NAME "-" MDPY_TYPE_3,
76 .format = DRM_FORMAT_XRGB8888,
77 .bytepp = 4,
78 .width = 1920,
79 .height = 1080,
80 },
81};
82
83static dev_t mdpy_devt;
84static struct class *mdpy_class;
85static struct cdev mdpy_cdev;
86static struct device mdpy_dev;
87static u32 mdpy_count;
88static const struct vfio_device_ops mdpy_dev_ops;
89
90/* State of each mdev device */
91struct mdev_state {
92 struct vfio_device vdev;
93 u8 *vconfig;
94 u32 bar_mask;
95 struct mutex ops_lock;
96 struct mdev_device *mdev;
97 struct vfio_device_info dev_info;
98
99 const struct mdpy_type *type;
100 u32 memsize;
101 void *memblk;
102};
103
104static void mdpy_create_config_space(struct mdev_state *mdev_state)
105{
106 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
107 MDPY_PCI_VENDOR_ID);
108 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
109 MDPY_PCI_DEVICE_ID);
110 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
111 MDPY_PCI_SUBVENDOR_ID);
112 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
113 MDPY_PCI_SUBDEVICE_ID);
114
115 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
116 PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
117 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
118 PCI_STATUS_CAP_LIST);
119 STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
120 PCI_CLASS_DISPLAY_OTHER);
121 mdev_state->vconfig[PCI_CLASS_REVISION] = 0x01;
122
123 STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
124 PCI_BASE_ADDRESS_SPACE_MEMORY |
125 PCI_BASE_ADDRESS_MEM_TYPE_32 |
126 PCI_BASE_ADDRESS_MEM_PREFETCH);
127 mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
128
129 /* vendor specific capability for the config registers */
130 mdev_state->vconfig[PCI_CAPABILITY_LIST] = MDPY_VENDORCAP_OFFSET;
131 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
132 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
133 mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
134 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
135 mdev_state->type->format);
136 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
137 mdev_state->type->width);
138 STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
139 mdev_state->type->height);
140}
141
142static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
143 char *buf, u32 count)
144{
145 struct device *dev = mdev_dev(mdev_state->mdev);
146 u32 cfg_addr;
147
148 switch (offset) {
149 case PCI_BASE_ADDRESS_0:
150 cfg_addr = *(u32 *)buf;
151
152 if (cfg_addr == 0xffffffff) {
153 cfg_addr = (cfg_addr & mdev_state->bar_mask);
154 } else {
155 cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
156 if (cfg_addr)
157 dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
158 }
159
160 cfg_addr |= (mdev_state->vconfig[offset] &
161 ~PCI_BASE_ADDRESS_MEM_MASK);
162 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
163 break;
164 }
165}
166
167static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
168 size_t count, loff_t pos, bool is_write)
169{
170 int ret = 0;
171
172 mutex_lock(&mdev_state->ops_lock);
173
174 if (pos < MDPY_CONFIG_SPACE_SIZE) {
175 if (is_write)
176 handle_pci_cfg_write(mdev_state, pos, buf, count);
177 else
178 memcpy(buf, (mdev_state->vconfig + pos), count);
179
180 } else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
181 (pos + count <=
182 MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
183 pos -= MDPY_MEMORY_BAR_OFFSET;
184 if (is_write)
185 memcpy(mdev_state->memblk, buf, count);
186 else
187 memcpy(buf, mdev_state->memblk, count);
188
189 } else {
190 dev_info(mdev_state->vdev.dev,
191 "%s: %s @0x%llx (unhandled)\n", __func__,
192 is_write ? "WR" : "RD", pos);
193 ret = -1;
194 goto accessfailed;
195 }
196
197 ret = count;
198
199
200accessfailed:
201 mutex_unlock(&mdev_state->ops_lock);
202
203 return ret;
204}
205
206static int mdpy_reset(struct mdev_state *mdev_state)
207{
208 u32 stride, i;
209
210 /* initialize with gray gradient */
211 stride = mdev_state->type->width * mdev_state->type->bytepp;
212 for (i = 0; i < mdev_state->type->height; i++)
213 memset(mdev_state->memblk + i * stride,
214 i * 255 / mdev_state->type->height,
215 stride);
216 return 0;
217}
218
219static int mdpy_probe(struct mdev_device *mdev)
220{
221 const struct mdpy_type *type =
222 &mdpy_types[mdev_get_type_group_id(mdev)];
223 struct device *dev = mdev_dev(mdev);
224 struct mdev_state *mdev_state;
225 u32 fbsize;
226 int ret;
227
228 if (mdpy_count >= max_devices)
229 return -ENOMEM;
230
231 mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
232 if (mdev_state == NULL)
233 return -ENOMEM;
234 vfio_init_group_dev(&mdev_state->vdev, &mdev->dev, &mdpy_dev_ops);
235
236 mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
237 if (mdev_state->vconfig == NULL) {
238 kfree(mdev_state);
239 return -ENOMEM;
240 }
241
242 fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
243
244 mdev_state->memblk = vmalloc_user(fbsize);
245 if (!mdev_state->memblk) {
246 kfree(mdev_state->vconfig);
247 kfree(mdev_state);
248 return -ENOMEM;
249 }
250 dev_info(dev, "%s: %s (%dx%d)\n", __func__, type->name, type->width,
251 type->height);
252
253 mutex_init(&mdev_state->ops_lock);
254 mdev_state->mdev = mdev;
255 mdev_state->type = type;
256 mdev_state->memsize = fbsize;
257 mdpy_create_config_space(mdev_state);
258 mdpy_reset(mdev_state);
259
260 mdpy_count++;
261
262 ret = vfio_register_group_dev(&mdev_state->vdev);
263 if (ret) {
264 kfree(mdev_state->vconfig);
265 kfree(mdev_state);
266 return ret;
267 }
268 dev_set_drvdata(&mdev->dev, mdev_state);
269 return 0;
270}
271
272static void mdpy_remove(struct mdev_device *mdev)
273{
274 struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
275
276 dev_info(&mdev->dev, "%s\n", __func__);
277
278 vfio_unregister_group_dev(&mdev_state->vdev);
279 vfree(mdev_state->memblk);
280 kfree(mdev_state->vconfig);
281 kfree(mdev_state);
282
283 mdpy_count--;
284}
285
286static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
287 size_t count, loff_t *ppos)
288{
289 struct mdev_state *mdev_state =
290 container_of(vdev, struct mdev_state, vdev);
291 unsigned int done = 0;
292 int ret;
293
294 while (count) {
295 size_t filled;
296
297 if (count >= 4 && !(*ppos % 4)) {
298 u32 val;
299
300 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
301 *ppos, false);
302 if (ret <= 0)
303 goto read_err;
304
305 if (copy_to_user(buf, &val, sizeof(val)))
306 goto read_err;
307
308 filled = 4;
309 } else if (count >= 2 && !(*ppos % 2)) {
310 u16 val;
311
312 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
313 *ppos, false);
314 if (ret <= 0)
315 goto read_err;
316
317 if (copy_to_user(buf, &val, sizeof(val)))
318 goto read_err;
319
320 filled = 2;
321 } else {
322 u8 val;
323
324 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
325 *ppos, false);
326 if (ret <= 0)
327 goto read_err;
328
329 if (copy_to_user(buf, &val, sizeof(val)))
330 goto read_err;
331
332 filled = 1;
333 }
334
335 count -= filled;
336 done += filled;
337 *ppos += filled;
338 buf += filled;
339 }
340
341 return done;
342
343read_err:
344 return -EFAULT;
345}
346
347static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
348 size_t count, loff_t *ppos)
349{
350 struct mdev_state *mdev_state =
351 container_of(vdev, struct mdev_state, vdev);
352 unsigned int done = 0;
353 int ret;
354
355 while (count) {
356 size_t filled;
357
358 if (count >= 4 && !(*ppos % 4)) {
359 u32 val;
360
361 if (copy_from_user(&val, buf, sizeof(val)))
362 goto write_err;
363
364 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
365 *ppos, true);
366 if (ret <= 0)
367 goto write_err;
368
369 filled = 4;
370 } else if (count >= 2 && !(*ppos % 2)) {
371 u16 val;
372
373 if (copy_from_user(&val, buf, sizeof(val)))
374 goto write_err;
375
376 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
377 *ppos, true);
378 if (ret <= 0)
379 goto write_err;
380
381 filled = 2;
382 } else {
383 u8 val;
384
385 if (copy_from_user(&val, buf, sizeof(val)))
386 goto write_err;
387
388 ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
389 *ppos, true);
390 if (ret <= 0)
391 goto write_err;
392
393 filled = 1;
394 }
395 count -= filled;
396 done += filled;
397 *ppos += filled;
398 buf += filled;
399 }
400
401 return done;
402write_err:
403 return -EFAULT;
404}
405
406static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
407{
408 struct mdev_state *mdev_state =
409 container_of(vdev, struct mdev_state, vdev);
410
411 if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
412 return -EINVAL;
413 if (vma->vm_end < vma->vm_start)
414 return -EINVAL;
415 if (vma->vm_end - vma->vm_start > mdev_state->memsize)
416 return -EINVAL;
417 if ((vma->vm_flags & VM_SHARED) == 0)
418 return -EINVAL;
419
420 return remap_vmalloc_range(vma, mdev_state->memblk, 0);
421}
422
423static int mdpy_get_region_info(struct mdev_state *mdev_state,
424 struct vfio_region_info *region_info,
425 u16 *cap_type_id, void **cap_type)
426{
427 if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
428 region_info->index != MDPY_DISPLAY_REGION)
429 return -EINVAL;
430
431 switch (region_info->index) {
432 case VFIO_PCI_CONFIG_REGION_INDEX:
433 region_info->offset = 0;
434 region_info->size = MDPY_CONFIG_SPACE_SIZE;
435 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
436 VFIO_REGION_INFO_FLAG_WRITE);
437 break;
438 case VFIO_PCI_BAR0_REGION_INDEX:
439 case MDPY_DISPLAY_REGION:
440 region_info->offset = MDPY_MEMORY_BAR_OFFSET;
441 region_info->size = mdev_state->memsize;
442 region_info->flags = (VFIO_REGION_INFO_FLAG_READ |
443 VFIO_REGION_INFO_FLAG_WRITE |
444 VFIO_REGION_INFO_FLAG_MMAP);
445 break;
446 default:
447 region_info->size = 0;
448 region_info->offset = 0;
449 region_info->flags = 0;
450 }
451
452 return 0;
453}
454
455static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
456{
457 irq_info->count = 0;
458 return 0;
459}
460
461static int mdpy_get_device_info(struct vfio_device_info *dev_info)
462{
463 dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
464 dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
465 dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
466 return 0;
467}
468
469static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
470 struct vfio_device_gfx_plane_info *plane)
471{
472 if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
473 if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
474 VFIO_GFX_PLANE_TYPE_REGION))
475 return 0;
476 return -EINVAL;
477 }
478
479 if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
480 return -EINVAL;
481
482 plane->drm_format = mdev_state->type->format;
483 plane->width = mdev_state->type->width;
484 plane->height = mdev_state->type->height;
485 plane->stride = (mdev_state->type->width *
486 mdev_state->type->bytepp);
487 plane->size = mdev_state->memsize;
488 plane->region_index = MDPY_DISPLAY_REGION;
489
490 /* unused */
491 plane->drm_format_mod = 0;
492 plane->x_pos = 0;
493 plane->y_pos = 0;
494 plane->x_hot = 0;
495 plane->y_hot = 0;
496
497 return 0;
498}
499
500static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
501 unsigned long arg)
502{
503 int ret = 0;
504 unsigned long minsz;
505 struct mdev_state *mdev_state =
506 container_of(vdev, struct mdev_state, vdev);
507
508 switch (cmd) {
509 case VFIO_DEVICE_GET_INFO:
510 {
511 struct vfio_device_info info;
512
513 minsz = offsetofend(struct vfio_device_info, num_irqs);
514
515 if (copy_from_user(&info, (void __user *)arg, minsz))
516 return -EFAULT;
517
518 if (info.argsz < minsz)
519 return -EINVAL;
520
521 ret = mdpy_get_device_info(&info);
522 if (ret)
523 return ret;
524
525 memcpy(&mdev_state->dev_info, &info, sizeof(info));
526
527 if (copy_to_user((void __user *)arg, &info, minsz))
528 return -EFAULT;
529
530 return 0;
531 }
532 case VFIO_DEVICE_GET_REGION_INFO:
533 {
534 struct vfio_region_info info;
535 u16 cap_type_id = 0;
536 void *cap_type = NULL;
537
538 minsz = offsetofend(struct vfio_region_info, offset);
539
540 if (copy_from_user(&info, (void __user *)arg, minsz))
541 return -EFAULT;
542
543 if (info.argsz < minsz)
544 return -EINVAL;
545
546 ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
547 &cap_type);
548 if (ret)
549 return ret;
550
551 if (copy_to_user((void __user *)arg, &info, minsz))
552 return -EFAULT;
553
554 return 0;
555 }
556
557 case VFIO_DEVICE_GET_IRQ_INFO:
558 {
559 struct vfio_irq_info info;
560
561 minsz = offsetofend(struct vfio_irq_info, count);
562
563 if (copy_from_user(&info, (void __user *)arg, minsz))
564 return -EFAULT;
565
566 if ((info.argsz < minsz) ||
567 (info.index >= mdev_state->dev_info.num_irqs))
568 return -EINVAL;
569
570 ret = mdpy_get_irq_info(&info);
571 if (ret)
572 return ret;
573
574 if (copy_to_user((void __user *)arg, &info, minsz))
575 return -EFAULT;
576
577 return 0;
578 }
579
580 case VFIO_DEVICE_QUERY_GFX_PLANE:
581 {
582 struct vfio_device_gfx_plane_info plane;
583
584 minsz = offsetofend(struct vfio_device_gfx_plane_info,
585 region_index);
586
587 if (copy_from_user(&plane, (void __user *)arg, minsz))
588 return -EFAULT;
589
590 if (plane.argsz < minsz)
591 return -EINVAL;
592
593 ret = mdpy_query_gfx_plane(mdev_state, &plane);
594 if (ret)
595 return ret;
596
597 if (copy_to_user((void __user *)arg, &plane, minsz))
598 return -EFAULT;
599
600 return 0;
601 }
602
603 case VFIO_DEVICE_SET_IRQS:
604 return -EINVAL;
605
606 case VFIO_DEVICE_RESET:
607 return mdpy_reset(mdev_state);
608 }
609 return -ENOTTY;
610}
611
612static int mdpy_open(struct vfio_device *vdev)
613{
614 if (!try_module_get(THIS_MODULE))
615 return -ENODEV;
616
617 return 0;
618}
619
620static void mdpy_close(struct vfio_device *vdev)
621{
622 module_put(THIS_MODULE);
623}
624
625static ssize_t
626resolution_show(struct device *dev, struct device_attribute *attr,
627 char *buf)
628{
629 struct mdev_state *mdev_state = dev_get_drvdata(dev);
630
631 return sprintf(buf, "%dx%d\n",
632 mdev_state->type->width,
633 mdev_state->type->height);
634}
635static DEVICE_ATTR_RO(resolution);
636
637static struct attribute *mdev_dev_attrs[] = {
638 &dev_attr_resolution.attr,
639 NULL,
640};
641
642static const struct attribute_group mdev_dev_group = {
643 .name = "vendor",
644 .attrs = mdev_dev_attrs,
645};
646
647static const struct attribute_group *mdev_dev_groups[] = {
648 &mdev_dev_group,
649 NULL,
650};
651
652static ssize_t name_show(struct mdev_type *mtype,
653 struct mdev_type_attribute *attr, char *buf)
654{
655 const struct mdpy_type *type =
656 &mdpy_types[mtype_get_type_group_id(mtype)];
657
658 return sprintf(buf, "%s\n", type->name);
659}
660static MDEV_TYPE_ATTR_RO(name);
661
662static ssize_t description_show(struct mdev_type *mtype,
663 struct mdev_type_attribute *attr, char *buf)
664{
665 const struct mdpy_type *type =
666 &mdpy_types[mtype_get_type_group_id(mtype)];
667
668 return sprintf(buf, "virtual display, %dx%d framebuffer\n",
669 type->width, type->height);
670}
671static MDEV_TYPE_ATTR_RO(description);
672
673static ssize_t available_instances_show(struct mdev_type *mtype,
674 struct mdev_type_attribute *attr,
675 char *buf)
676{
677 return sprintf(buf, "%d\n", max_devices - mdpy_count);
678}
679static MDEV_TYPE_ATTR_RO(available_instances);
680
681static ssize_t device_api_show(struct mdev_type *mtype,
682 struct mdev_type_attribute *attr, char *buf)
683{
684 return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
685}
686static MDEV_TYPE_ATTR_RO(device_api);
687
688static struct attribute *mdev_types_attrs[] = {
689 &mdev_type_attr_name.attr,
690 &mdev_type_attr_description.attr,
691 &mdev_type_attr_device_api.attr,
692 &mdev_type_attr_available_instances.attr,
693 NULL,
694};
695
696static struct attribute_group mdev_type_group1 = {
697 .name = MDPY_TYPE_1,
698 .attrs = mdev_types_attrs,
699};
700
701static struct attribute_group mdev_type_group2 = {
702 .name = MDPY_TYPE_2,
703 .attrs = mdev_types_attrs,
704};
705
706static struct attribute_group mdev_type_group3 = {
707 .name = MDPY_TYPE_3,
708 .attrs = mdev_types_attrs,
709};
710
711static struct attribute_group *mdev_type_groups[] = {
712 &mdev_type_group1,
713 &mdev_type_group2,
714 &mdev_type_group3,
715 NULL,
716};
717
718static const struct vfio_device_ops mdpy_dev_ops = {
719 .open = mdpy_open,
720 .release = mdpy_close,
721 .read = mdpy_read,
722 .write = mdpy_write,
723 .ioctl = mdpy_ioctl,
724 .mmap = mdpy_mmap,
725};
726
727static struct mdev_driver mdpy_driver = {
728 .driver = {
729 .name = "mdpy",
730 .owner = THIS_MODULE,
731 .mod_name = KBUILD_MODNAME,
732 .dev_groups = mdev_dev_groups,
733 },
734 .probe = mdpy_probe,
735 .remove = mdpy_remove,
736};
737
738static const struct mdev_parent_ops mdev_fops = {
739 .owner = THIS_MODULE,
740 .device_driver = &mdpy_driver,
741 .supported_type_groups = mdev_type_groups,
742};
743
744static const struct file_operations vd_fops = {
745 .owner = THIS_MODULE,
746};
747
748static void mdpy_device_release(struct device *dev)
749{
750 /* nothing */
751}
752
753static int __init mdpy_dev_init(void)
754{
755 int ret = 0;
756
757 ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
758 if (ret < 0) {
759 pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
760 return ret;
761 }
762 cdev_init(&mdpy_cdev, &vd_fops);
763 cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
764 pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
765
766 ret = mdev_register_driver(&mdpy_driver);
767 if (ret)
768 goto err_cdev;
769
770 mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
771 if (IS_ERR(mdpy_class)) {
772 pr_err("Error: failed to register mdpy_dev class\n");
773 ret = PTR_ERR(mdpy_class);
774 goto err_driver;
775 }
776 mdpy_dev.class = mdpy_class;
777 mdpy_dev.release = mdpy_device_release;
778 dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
779
780 ret = device_register(&mdpy_dev);
781 if (ret)
782 goto err_class;
783
784 ret = mdev_register_device(&mdpy_dev, &mdev_fops);
785 if (ret)
786 goto err_device;
787
788 return 0;
789
790err_device:
791 device_unregister(&mdpy_dev);
792err_class:
793 class_destroy(mdpy_class);
794err_driver:
795 mdev_unregister_driver(&mdpy_driver);
796err_cdev:
797 cdev_del(&mdpy_cdev);
798 unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
799 return ret;
800}
801
802static void __exit mdpy_dev_exit(void)
803{
804 mdpy_dev.bus = NULL;
805 mdev_unregister_device(&mdpy_dev);
806
807 device_unregister(&mdpy_dev);
808 mdev_unregister_driver(&mdpy_driver);
809 cdev_del(&mdpy_cdev);
810 unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
811 class_destroy(mdpy_class);
812 mdpy_class = NULL;
813}
814
815module_init(mdpy_dev_init)
816module_exit(mdpy_dev_exit)