Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Mediated virtual PCI display host device driver
  4 *
  5 * See mdpy-defs.h for device specs
  6 *
  7 *   (c) Gerd Hoffmann <kraxel@redhat.com>
  8 *
  9 * based on mtty driver which is:
 10 *   Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
 11 *	 Author: Neo Jia <cjia@nvidia.com>
 12 *		 Kirti Wankhede <kwankhede@nvidia.com>
 13 *
 14 * This program is free software; you can redistribute it and/or modify
 15 * it under the terms of the GNU General Public License version 2 as
 16 * published by the Free Software Foundation.
 17 */
 18#include <linux/init.h>
 19#include <linux/module.h>
 20#include <linux/kernel.h>
 21#include <linux/slab.h>
 22#include <linux/vmalloc.h>
 23#include <linux/cdev.h>
 24#include <linux/vfio.h>
 25#include <linux/iommu.h>
 26#include <linux/sysfs.h>
 27#include <linux/mdev.h>
 28#include <linux/pci.h>
 29#include <drm/drm_fourcc.h>
 30#include "mdpy-defs.h"
 31
 32#define MDPY_NAME		"mdpy"
 33#define MDPY_CLASS_NAME		"mdpy"
 34
 35#define MDPY_CONFIG_SPACE_SIZE	0xff
 36#define MDPY_MEMORY_BAR_OFFSET	PAGE_SIZE
 37#define MDPY_DISPLAY_REGION	16
 38
 39#define STORE_LE16(addr, val)	(*(u16 *)addr = val)
 40#define STORE_LE32(addr, val)	(*(u32 *)addr = val)
 41
 42
 43MODULE_LICENSE("GPL v2");
 44
 45#define MDPY_TYPE_1 "vga"
 46#define MDPY_TYPE_2 "xga"
 47#define MDPY_TYPE_3 "hd"
 48
 49static struct mdpy_type {
 50	struct mdev_type type;
 51	u32 format;
 52	u32 bytepp;
 53	u32 width;
 54	u32 height;
 55} mdpy_types[] = {
 56	{
 57		.type.sysfs_name 	= MDPY_TYPE_1,
 58		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_1,
 59		.format = DRM_FORMAT_XRGB8888,
 60		.bytepp = 4,
 61		.width	= 640,
 62		.height = 480,
 63	}, {
 64		.type.sysfs_name 	= MDPY_TYPE_2,
 65		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_2,
 66		.format = DRM_FORMAT_XRGB8888,
 67		.bytepp = 4,
 68		.width	= 1024,
 69		.height = 768,
 70	}, {
 71		.type.sysfs_name 	= MDPY_TYPE_3,
 72		.type.pretty_name	= MDPY_CLASS_NAME "-" MDPY_TYPE_3,
 73		.format = DRM_FORMAT_XRGB8888,
 74		.bytepp = 4,
 75		.width	= 1920,
 76		.height = 1080,
 77	},
 78};
 79
 80static struct mdev_type *mdpy_mdev_types[] = {
 81	&mdpy_types[0].type,
 82	&mdpy_types[1].type,
 83	&mdpy_types[2].type,
 84};
 85
 86static dev_t		mdpy_devt;
 87static struct class	*mdpy_class;
 88static struct cdev	mdpy_cdev;
 89static struct device	mdpy_dev;
 90static struct mdev_parent mdpy_parent;
 91static const struct vfio_device_ops mdpy_dev_ops;
 92
 93/* State of each mdev device */
 94struct mdev_state {
 95	struct vfio_device vdev;
 96	u8 *vconfig;
 97	u32 bar_mask;
 98	struct mutex ops_lock;
 99	struct mdev_device *mdev;
100	struct vfio_device_info dev_info;
101
102	const struct mdpy_type *type;
103	u32 memsize;
104	void *memblk;
105};
106
107static void mdpy_create_config_space(struct mdev_state *mdev_state)
108{
109	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_VENDOR_ID],
110		   MDPY_PCI_VENDOR_ID);
111	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_DEVICE_ID],
112		   MDPY_PCI_DEVICE_ID);
113	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_VENDOR_ID],
114		   MDPY_PCI_SUBVENDOR_ID);
115	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_SUBSYSTEM_ID],
116		   MDPY_PCI_SUBDEVICE_ID);
117
118	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_COMMAND],
119		   PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
120	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_STATUS],
121		   PCI_STATUS_CAP_LIST);
122	STORE_LE16((u16 *) &mdev_state->vconfig[PCI_CLASS_DEVICE],
123		   PCI_CLASS_DISPLAY_OTHER);
124	mdev_state->vconfig[PCI_CLASS_REVISION] =  0x01;
125
126	STORE_LE32((u32 *) &mdev_state->vconfig[PCI_BASE_ADDRESS_0],
127		   PCI_BASE_ADDRESS_SPACE_MEMORY |
128		   PCI_BASE_ADDRESS_MEM_TYPE_32	 |
129		   PCI_BASE_ADDRESS_MEM_PREFETCH);
130	mdev_state->bar_mask = ~(mdev_state->memsize) + 1;
131
132	/* vendor specific capability for the config registers */
133	mdev_state->vconfig[PCI_CAPABILITY_LIST]       = MDPY_VENDORCAP_OFFSET;
134	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 0] = 0x09; /* vendor cap */
135	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 1] = 0x00; /* next ptr */
136	mdev_state->vconfig[MDPY_VENDORCAP_OFFSET + 2] = MDPY_VENDORCAP_SIZE;
137	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_FORMAT_OFFSET],
138		   mdev_state->type->format);
139	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_WIDTH_OFFSET],
140		   mdev_state->type->width);
141	STORE_LE32((u32 *) &mdev_state->vconfig[MDPY_HEIGHT_OFFSET],
142		   mdev_state->type->height);
143}
144
145static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
146				 char *buf, u32 count)
147{
148	struct device *dev = mdev_dev(mdev_state->mdev);
149	u32 cfg_addr;
150
151	switch (offset) {
152	case PCI_BASE_ADDRESS_0:
153		cfg_addr = *(u32 *)buf;
154
155		if (cfg_addr == 0xffffffff) {
156			cfg_addr = (cfg_addr & mdev_state->bar_mask);
157		} else {
158			cfg_addr &= PCI_BASE_ADDRESS_MEM_MASK;
159			if (cfg_addr)
160				dev_info(dev, "BAR0 @ 0x%x\n", cfg_addr);
161		}
162
163		cfg_addr |= (mdev_state->vconfig[offset] &
164			     ~PCI_BASE_ADDRESS_MEM_MASK);
165		STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
166		break;
167	}
168}
169
170static ssize_t mdev_access(struct mdev_state *mdev_state, char *buf,
171			   size_t count, loff_t pos, bool is_write)
172{
173	int ret = 0;
174
175	mutex_lock(&mdev_state->ops_lock);
176
177	if (pos < MDPY_CONFIG_SPACE_SIZE) {
178		if (is_write)
179			handle_pci_cfg_write(mdev_state, pos, buf, count);
180		else
181			memcpy(buf, (mdev_state->vconfig + pos), count);
182
183	} else if ((pos >= MDPY_MEMORY_BAR_OFFSET) &&
184		   (pos + count <=
185		    MDPY_MEMORY_BAR_OFFSET + mdev_state->memsize)) {
186		pos -= MDPY_MEMORY_BAR_OFFSET;
187		if (is_write)
188			memcpy(mdev_state->memblk, buf, count);
189		else
190			memcpy(buf, mdev_state->memblk, count);
191
192	} else {
193		dev_info(mdev_state->vdev.dev,
194			 "%s: %s @0x%llx (unhandled)\n", __func__,
195			 is_write ? "WR" : "RD", pos);
196		ret = -1;
197		goto accessfailed;
198	}
199
200	ret = count;
201
202
203accessfailed:
204	mutex_unlock(&mdev_state->ops_lock);
205
206	return ret;
207}
208
209static int mdpy_reset(struct mdev_state *mdev_state)
210{
211	u32 stride, i;
212
213	/* initialize with gray gradient */
214	stride = mdev_state->type->width * mdev_state->type->bytepp;
215	for (i = 0; i < mdev_state->type->height; i++)
216		memset(mdev_state->memblk + i * stride,
217		       i * 255 / mdev_state->type->height,
218		       stride);
219	return 0;
220}
221
222static int mdpy_init_dev(struct vfio_device *vdev)
223{
224	struct mdev_state *mdev_state =
225		container_of(vdev, struct mdev_state, vdev);
226	struct mdev_device *mdev = to_mdev_device(vdev->dev);
227	const struct mdpy_type *type =
228		container_of(mdev->type, struct mdpy_type, type);
229	u32 fbsize;
230	int ret = -ENOMEM;
231
232	mdev_state->vconfig = kzalloc(MDPY_CONFIG_SPACE_SIZE, GFP_KERNEL);
233	if (!mdev_state->vconfig)
234		return ret;
235
236	fbsize = roundup_pow_of_two(type->width * type->height * type->bytepp);
237
238	mdev_state->memblk = vmalloc_user(fbsize);
239	if (!mdev_state->memblk)
240		goto out_vconfig;
241
242	mutex_init(&mdev_state->ops_lock);
243	mdev_state->mdev = mdev;
244	mdev_state->type = type;
245	mdev_state->memsize = fbsize;
246	mdpy_create_config_space(mdev_state);
247	mdpy_reset(mdev_state);
248
249	dev_info(vdev->dev, "%s: %s (%dx%d)\n", __func__, type->type.pretty_name,
250		 type->width, type->height);
251	return 0;
252
253out_vconfig:
254	kfree(mdev_state->vconfig);
255	return ret;
256}
257
258static int mdpy_probe(struct mdev_device *mdev)
259{
260	struct mdev_state *mdev_state;
261	int ret;
262
263	mdev_state = vfio_alloc_device(mdev_state, vdev, &mdev->dev,
264				       &mdpy_dev_ops);
265	if (IS_ERR(mdev_state))
266		return PTR_ERR(mdev_state);
267
268	ret = vfio_register_emulated_iommu_dev(&mdev_state->vdev);
269	if (ret)
270		goto err_put_vdev;
271	dev_set_drvdata(&mdev->dev, mdev_state);
272	return 0;
273
274err_put_vdev:
275	vfio_put_device(&mdev_state->vdev);
276	return ret;
277}
278
279static void mdpy_release_dev(struct vfio_device *vdev)
280{
281	struct mdev_state *mdev_state =
282		container_of(vdev, struct mdev_state, vdev);
283
284	vfree(mdev_state->memblk);
285	kfree(mdev_state->vconfig);
286}
287
288static void mdpy_remove(struct mdev_device *mdev)
289{
290	struct mdev_state *mdev_state = dev_get_drvdata(&mdev->dev);
291
292	dev_info(&mdev->dev, "%s\n", __func__);
293
294	vfio_unregister_group_dev(&mdev_state->vdev);
295	vfio_put_device(&mdev_state->vdev);
296}
297
298static ssize_t mdpy_read(struct vfio_device *vdev, char __user *buf,
299			 size_t count, loff_t *ppos)
300{
301	struct mdev_state *mdev_state =
302		container_of(vdev, struct mdev_state, vdev);
303	unsigned int done = 0;
304	int ret;
305
306	while (count) {
307		size_t filled;
308
309		if (count >= 4 && !(*ppos % 4)) {
310			u32 val;
311
312			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
313					  *ppos, false);
314			if (ret <= 0)
315				goto read_err;
316
317			if (copy_to_user(buf, &val, sizeof(val)))
318				goto read_err;
319
320			filled = 4;
321		} else if (count >= 2 && !(*ppos % 2)) {
322			u16 val;
323
324			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
325					  *ppos, false);
326			if (ret <= 0)
327				goto read_err;
328
329			if (copy_to_user(buf, &val, sizeof(val)))
330				goto read_err;
331
332			filled = 2;
333		} else {
334			u8 val;
335
336			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
337					  *ppos, false);
338			if (ret <= 0)
339				goto read_err;
340
341			if (copy_to_user(buf, &val, sizeof(val)))
342				goto read_err;
343
344			filled = 1;
345		}
346
347		count -= filled;
348		done += filled;
349		*ppos += filled;
350		buf += filled;
351	}
352
353	return done;
354
355read_err:
356	return -EFAULT;
357}
358
359static ssize_t mdpy_write(struct vfio_device *vdev, const char __user *buf,
360			  size_t count, loff_t *ppos)
361{
362	struct mdev_state *mdev_state =
363		container_of(vdev, struct mdev_state, vdev);
364	unsigned int done = 0;
365	int ret;
366
367	while (count) {
368		size_t filled;
369
370		if (count >= 4 && !(*ppos % 4)) {
371			u32 val;
372
373			if (copy_from_user(&val, buf, sizeof(val)))
374				goto write_err;
375
376			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
377					  *ppos, true);
378			if (ret <= 0)
379				goto write_err;
380
381			filled = 4;
382		} else if (count >= 2 && !(*ppos % 2)) {
383			u16 val;
384
385			if (copy_from_user(&val, buf, sizeof(val)))
386				goto write_err;
387
388			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
389					  *ppos, true);
390			if (ret <= 0)
391				goto write_err;
392
393			filled = 2;
394		} else {
395			u8 val;
396
397			if (copy_from_user(&val, buf, sizeof(val)))
398				goto write_err;
399
400			ret = mdev_access(mdev_state, (char *)&val, sizeof(val),
401					  *ppos, true);
402			if (ret <= 0)
403				goto write_err;
404
405			filled = 1;
406		}
407		count -= filled;
408		done += filled;
409		*ppos += filled;
410		buf += filled;
411	}
412
413	return done;
414write_err:
415	return -EFAULT;
416}
417
418static int mdpy_mmap(struct vfio_device *vdev, struct vm_area_struct *vma)
419{
420	struct mdev_state *mdev_state =
421		container_of(vdev, struct mdev_state, vdev);
422
423	if (vma->vm_pgoff != MDPY_MEMORY_BAR_OFFSET >> PAGE_SHIFT)
424		return -EINVAL;
425	if (vma->vm_end < vma->vm_start)
426		return -EINVAL;
427	if (vma->vm_end - vma->vm_start > mdev_state->memsize)
428		return -EINVAL;
429	if ((vma->vm_flags & VM_SHARED) == 0)
430		return -EINVAL;
431
432	return remap_vmalloc_range(vma, mdev_state->memblk, 0);
433}
434
435static int mdpy_get_region_info(struct mdev_state *mdev_state,
436				struct vfio_region_info *region_info,
437				u16 *cap_type_id, void **cap_type)
438{
439	if (region_info->index >= VFIO_PCI_NUM_REGIONS &&
440	    region_info->index != MDPY_DISPLAY_REGION)
441		return -EINVAL;
442
443	switch (region_info->index) {
444	case VFIO_PCI_CONFIG_REGION_INDEX:
445		region_info->offset = 0;
446		region_info->size   = MDPY_CONFIG_SPACE_SIZE;
447		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ |
448				       VFIO_REGION_INFO_FLAG_WRITE);
449		break;
450	case VFIO_PCI_BAR0_REGION_INDEX:
451	case MDPY_DISPLAY_REGION:
452		region_info->offset = MDPY_MEMORY_BAR_OFFSET;
453		region_info->size   = mdev_state->memsize;
454		region_info->flags  = (VFIO_REGION_INFO_FLAG_READ  |
455				       VFIO_REGION_INFO_FLAG_WRITE |
456				       VFIO_REGION_INFO_FLAG_MMAP);
457		break;
458	default:
459		region_info->size   = 0;
460		region_info->offset = 0;
461		region_info->flags  = 0;
462	}
463
464	return 0;
465}
466
467static int mdpy_get_irq_info(struct vfio_irq_info *irq_info)
468{
469	irq_info->count = 0;
470	return 0;
471}
472
473static int mdpy_get_device_info(struct vfio_device_info *dev_info)
474{
475	dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
476	dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
477	dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
478	return 0;
479}
480
481static int mdpy_query_gfx_plane(struct mdev_state *mdev_state,
482				struct vfio_device_gfx_plane_info *plane)
483{
484	if (plane->flags & VFIO_GFX_PLANE_TYPE_PROBE) {
485		if (plane->flags == (VFIO_GFX_PLANE_TYPE_PROBE |
486				     VFIO_GFX_PLANE_TYPE_REGION))
487			return 0;
488		return -EINVAL;
489	}
490
491	if (plane->flags != VFIO_GFX_PLANE_TYPE_REGION)
492		return -EINVAL;
493
494	plane->drm_format     = mdev_state->type->format;
495	plane->width	      = mdev_state->type->width;
496	plane->height	      = mdev_state->type->height;
497	plane->stride	      = (mdev_state->type->width *
498				 mdev_state->type->bytepp);
499	plane->size	      = mdev_state->memsize;
500	plane->region_index   = MDPY_DISPLAY_REGION;
501
502	/* unused */
503	plane->drm_format_mod = 0;
504	plane->x_pos	      = 0;
505	plane->y_pos	      = 0;
506	plane->x_hot	      = 0;
507	plane->y_hot	      = 0;
508
509	return 0;
510}
511
512static long mdpy_ioctl(struct vfio_device *vdev, unsigned int cmd,
513		       unsigned long arg)
514{
515	int ret = 0;
516	unsigned long minsz;
517	struct mdev_state *mdev_state =
518		container_of(vdev, struct mdev_state, vdev);
519
520	switch (cmd) {
521	case VFIO_DEVICE_GET_INFO:
522	{
523		struct vfio_device_info info;
524
525		minsz = offsetofend(struct vfio_device_info, num_irqs);
526
527		if (copy_from_user(&info, (void __user *)arg, minsz))
528			return -EFAULT;
529
530		if (info.argsz < minsz)
531			return -EINVAL;
532
533		ret = mdpy_get_device_info(&info);
534		if (ret)
535			return ret;
536
537		memcpy(&mdev_state->dev_info, &info, sizeof(info));
538
539		if (copy_to_user((void __user *)arg, &info, minsz))
540			return -EFAULT;
541
542		return 0;
543	}
544	case VFIO_DEVICE_GET_REGION_INFO:
545	{
546		struct vfio_region_info info;
547		u16 cap_type_id = 0;
548		void *cap_type = NULL;
549
550		minsz = offsetofend(struct vfio_region_info, offset);
551
552		if (copy_from_user(&info, (void __user *)arg, minsz))
553			return -EFAULT;
554
555		if (info.argsz < minsz)
556			return -EINVAL;
557
558		ret = mdpy_get_region_info(mdev_state, &info, &cap_type_id,
559					   &cap_type);
560		if (ret)
561			return ret;
562
563		if (copy_to_user((void __user *)arg, &info, minsz))
564			return -EFAULT;
565
566		return 0;
567	}
568
569	case VFIO_DEVICE_GET_IRQ_INFO:
570	{
571		struct vfio_irq_info info;
572
573		minsz = offsetofend(struct vfio_irq_info, count);
574
575		if (copy_from_user(&info, (void __user *)arg, minsz))
576			return -EFAULT;
577
578		if ((info.argsz < minsz) ||
579		    (info.index >= mdev_state->dev_info.num_irqs))
580			return -EINVAL;
581
582		ret = mdpy_get_irq_info(&info);
583		if (ret)
584			return ret;
585
586		if (copy_to_user((void __user *)arg, &info, minsz))
587			return -EFAULT;
588
589		return 0;
590	}
591
592	case VFIO_DEVICE_QUERY_GFX_PLANE:
593	{
594		struct vfio_device_gfx_plane_info plane;
595
596		minsz = offsetofend(struct vfio_device_gfx_plane_info,
597				    region_index);
598
599		if (copy_from_user(&plane, (void __user *)arg, minsz))
600			return -EFAULT;
601
602		if (plane.argsz < minsz)
603			return -EINVAL;
604
605		ret = mdpy_query_gfx_plane(mdev_state, &plane);
606		if (ret)
607			return ret;
608
609		if (copy_to_user((void __user *)arg, &plane, minsz))
610			return -EFAULT;
611
612		return 0;
613	}
614
615	case VFIO_DEVICE_SET_IRQS:
616		return -EINVAL;
617
618	case VFIO_DEVICE_RESET:
619		return mdpy_reset(mdev_state);
620	}
621	return -ENOTTY;
622}
623
624static ssize_t
625resolution_show(struct device *dev, struct device_attribute *attr,
626		char *buf)
627{
628	struct mdev_state *mdev_state = dev_get_drvdata(dev);
629
630	return sprintf(buf, "%dx%d\n",
631		       mdev_state->type->width,
632		       mdev_state->type->height);
633}
634static DEVICE_ATTR_RO(resolution);
635
636static struct attribute *mdev_dev_attrs[] = {
637	&dev_attr_resolution.attr,
638	NULL,
639};
640
641static const struct attribute_group mdev_dev_group = {
642	.name  = "vendor",
643	.attrs = mdev_dev_attrs,
644};
645
646static const struct attribute_group *mdev_dev_groups[] = {
647	&mdev_dev_group,
648	NULL,
649};
650
651static ssize_t mdpy_show_description(struct mdev_type *mtype, char *buf)
652{
653	struct mdpy_type *type = container_of(mtype, struct mdpy_type, type);
654
655	return sprintf(buf, "virtual display, %dx%d framebuffer\n",
656		       type->width, type->height);
657}
658
659static const struct vfio_device_ops mdpy_dev_ops = {
660	.init = mdpy_init_dev,
661	.release = mdpy_release_dev,
662	.read = mdpy_read,
663	.write = mdpy_write,
664	.ioctl = mdpy_ioctl,
665	.mmap = mdpy_mmap,
666};
667
668static struct mdev_driver mdpy_driver = {
669	.device_api = VFIO_DEVICE_API_PCI_STRING,
670	.max_instances = 4,
671	.driver = {
672		.name = "mdpy",
673		.owner = THIS_MODULE,
674		.mod_name = KBUILD_MODNAME,
675		.dev_groups = mdev_dev_groups,
676	},
677	.probe = mdpy_probe,
678	.remove	= mdpy_remove,
679	.show_description = mdpy_show_description,
680};
681
682static const struct file_operations vd_fops = {
683	.owner		= THIS_MODULE,
684};
685
686static void mdpy_device_release(struct device *dev)
687{
688	/* nothing */
689}
690
691static int __init mdpy_dev_init(void)
692{
693	int ret = 0;
694
695	ret = alloc_chrdev_region(&mdpy_devt, 0, MINORMASK + 1, MDPY_NAME);
696	if (ret < 0) {
697		pr_err("Error: failed to register mdpy_dev, err: %d\n", ret);
698		return ret;
699	}
700	cdev_init(&mdpy_cdev, &vd_fops);
701	cdev_add(&mdpy_cdev, mdpy_devt, MINORMASK + 1);
702	pr_info("%s: major %d\n", __func__, MAJOR(mdpy_devt));
703
704	ret = mdev_register_driver(&mdpy_driver);
705	if (ret)
706		goto err_cdev;
707
708	mdpy_class = class_create(THIS_MODULE, MDPY_CLASS_NAME);
709	if (IS_ERR(mdpy_class)) {
710		pr_err("Error: failed to register mdpy_dev class\n");
711		ret = PTR_ERR(mdpy_class);
712		goto err_driver;
713	}
714	mdpy_dev.class = mdpy_class;
715	mdpy_dev.release = mdpy_device_release;
716	dev_set_name(&mdpy_dev, "%s", MDPY_NAME);
717
718	ret = device_register(&mdpy_dev);
719	if (ret)
720		goto err_put;
721
722	ret = mdev_register_parent(&mdpy_parent, &mdpy_dev, &mdpy_driver,
723				   mdpy_mdev_types,
724				   ARRAY_SIZE(mdpy_mdev_types));
725	if (ret)
726		goto err_device;
727
728	return 0;
729
730err_device:
731	device_del(&mdpy_dev);
732err_put:
733	put_device(&mdpy_dev);
734	class_destroy(mdpy_class);
735err_driver:
736	mdev_unregister_driver(&mdpy_driver);
737err_cdev:
738	cdev_del(&mdpy_cdev);
739	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
740	return ret;
741}
742
743static void __exit mdpy_dev_exit(void)
744{
745	mdpy_dev.bus = NULL;
746	mdev_unregister_parent(&mdpy_parent);
747
748	device_unregister(&mdpy_dev);
749	mdev_unregister_driver(&mdpy_driver);
750	cdev_del(&mdpy_cdev);
751	unregister_chrdev_region(mdpy_devt, MINORMASK + 1);
752	class_destroy(mdpy_class);
753	mdpy_class = NULL;
754}
755
756module_param_named(count, mdpy_driver.max_instances, int, 0444);
757MODULE_PARM_DESC(count, "number of " MDPY_NAME " devices");
758
759module_init(mdpy_dev_init)
760module_exit(mdpy_dev_exit)