Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1/*
  2 *
  3 * Intel Management Engine Interface (Intel MEI) Linux driver
  4 * Copyright (c) 2003-2012, Intel Corporation.
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms and conditions of the GNU General Public License,
  8 * version 2, as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope it will be useful, but WITHOUT
 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 13 * more details.
 14 *
 15 */
 16#include <linux/module.h>
 17#include <linux/moduleparam.h>
 18#include <linux/kernel.h>
 19#include <linux/device.h>
 20#include <linux/slab.h>
 21#include <linux/fs.h>
 22#include <linux/errno.h>
 23#include <linux/types.h>
 24#include <linux/fcntl.h>
 25#include <linux/poll.h>
 26#include <linux/init.h>
 27#include <linux/ioctl.h>
 28#include <linux/cdev.h>
 29#include <linux/sched.h>
 30#include <linux/uuid.h>
 31#include <linux/compat.h>
 32#include <linux/jiffies.h>
 33#include <linux/interrupt.h>
 34
 35#include <linux/mei.h>
 36
 37#include "mei_dev.h"
 38#include "client.h"
 39
 40/**
 41 * mei_open - the open function
 42 *
 43 * @inode: pointer to inode structure
 44 * @file: pointer to file structure
 45 *
 46 * Return: 0 on success, <0 on error
 47 */
 48static int mei_open(struct inode *inode, struct file *file)
 49{
 50	struct mei_device *dev;
 51	struct mei_cl *cl;
 52
 53	int err;
 54
 55	dev = container_of(inode->i_cdev, struct mei_device, cdev);
 56	if (!dev)
 57		return -ENODEV;
 58
 59	mutex_lock(&dev->device_lock);
 60
 61	if (dev->dev_state != MEI_DEV_ENABLED) {
 62		dev_dbg(dev->dev, "dev_state != MEI_ENABLED  dev_state = %s\n",
 63		    mei_dev_state_str(dev->dev_state));
 64		err = -ENODEV;
 65		goto err_unlock;
 66	}
 67
 68	cl = mei_cl_alloc_linked(dev);
 69	if (IS_ERR(cl)) {
 70		err = PTR_ERR(cl);
 71		goto err_unlock;
 72	}
 73
 74	cl->fp = file;
 75	file->private_data = cl;
 76
 77	mutex_unlock(&dev->device_lock);
 78
 79	return nonseekable_open(inode, file);
 80
 81err_unlock:
 82	mutex_unlock(&dev->device_lock);
 83	return err;
 84}
 85
 86/**
 87 * mei_release - the release function
 88 *
 89 * @inode: pointer to inode structure
 90 * @file: pointer to file structure
 91 *
 92 * Return: 0 on success, <0 on error
 93 */
 94static int mei_release(struct inode *inode, struct file *file)
 95{
 96	struct mei_cl *cl = file->private_data;
 97	struct mei_device *dev;
 98	int rets;
 99
100	if (WARN_ON(!cl || !cl->dev))
101		return -ENODEV;
102
103	dev = cl->dev;
104
105	mutex_lock(&dev->device_lock);
106	if (cl == &dev->iamthif_cl) {
107		rets = mei_amthif_release(dev, file);
108		goto out;
109	}
110	rets = mei_cl_disconnect(cl);
111
112	mei_cl_flush_queues(cl, file);
113	cl_dbg(dev, cl, "removing\n");
114
115	mei_cl_unlink(cl);
116
117	file->private_data = NULL;
118
119	kfree(cl);
120out:
121	mutex_unlock(&dev->device_lock);
122	return rets;
123}
124
125
126/**
127 * mei_read - the read function.
128 *
129 * @file: pointer to file structure
130 * @ubuf: pointer to user buffer
131 * @length: buffer length
132 * @offset: data offset in buffer
133 *
134 * Return: >=0 data length on success , <0 on error
135 */
136static ssize_t mei_read(struct file *file, char __user *ubuf,
137			size_t length, loff_t *offset)
138{
139	struct mei_cl *cl = file->private_data;
140	struct mei_device *dev;
141	struct mei_cl_cb *cb = NULL;
142	bool nonblock = !!(file->f_flags & O_NONBLOCK);
143	int rets;
144
145	if (WARN_ON(!cl || !cl->dev))
146		return -ENODEV;
147
148	dev = cl->dev;
149
150
151	mutex_lock(&dev->device_lock);
152	if (dev->dev_state != MEI_DEV_ENABLED) {
153		rets = -ENODEV;
154		goto out;
155	}
156
157	if (length == 0) {
158		rets = 0;
159		goto out;
160	}
161
162	if (ubuf == NULL) {
163		rets = -EMSGSIZE;
164		goto out;
165	}
166
167	cb = mei_cl_read_cb(cl, file);
168	if (cb)
169		goto copy_buffer;
170
171	if (*offset > 0)
172		*offset = 0;
173
174	rets = mei_cl_read_start(cl, length, file);
175	if (rets && rets != -EBUSY) {
176		cl_dbg(dev, cl, "mei start read failure status = %d\n", rets);
177		goto out;
178	}
179
180	if (nonblock) {
181		rets = -EAGAIN;
182		goto out;
183	}
184
185
186again:
187	mutex_unlock(&dev->device_lock);
188	if (wait_event_interruptible(cl->rx_wait,
189				     !list_empty(&cl->rd_completed) ||
190				     !mei_cl_is_connected(cl))) {
191		if (signal_pending(current))
192			return -EINTR;
193		return -ERESTARTSYS;
194	}
195	mutex_lock(&dev->device_lock);
196
197	if (!mei_cl_is_connected(cl)) {
198		rets = -ENODEV;
199		goto out;
200	}
201
202	cb = mei_cl_read_cb(cl, file);
203	if (!cb) {
204		/*
205		 * For amthif all the waiters are woken up,
206		 * but only fp with matching cb->fp get the cb,
207		 * the others have to return to wait on read.
208		 */
209		if (cl == &dev->iamthif_cl)
210			goto again;
211
212		rets = 0;
213		goto out;
214	}
215
216copy_buffer:
217	/* now copy the data to user space */
218	if (cb->status) {
219		rets = cb->status;
220		cl_dbg(dev, cl, "read operation failed %d\n", rets);
221		goto free;
222	}
223
224	cl_dbg(dev, cl, "buf.size = %zu buf.idx = %zu offset = %lld\n",
225	       cb->buf.size, cb->buf_idx, *offset);
226	if (*offset >= cb->buf_idx) {
227		rets = 0;
228		goto free;
229	}
230
231	/* length is being truncated to PAGE_SIZE,
232	 * however buf_idx may point beyond that */
233	length = min_t(size_t, length, cb->buf_idx - *offset);
234
235	if (copy_to_user(ubuf, cb->buf.data + *offset, length)) {
236		dev_dbg(dev->dev, "failed to copy data to userland\n");
237		rets = -EFAULT;
238		goto free;
239	}
240
241	rets = length;
242	*offset += length;
243	/* not all data was read, keep the cb */
244	if (*offset < cb->buf_idx)
245		goto out;
246
247free:
248	mei_io_cb_free(cb);
249	*offset = 0;
250
251out:
252	cl_dbg(dev, cl, "end mei read rets = %d\n", rets);
253	mutex_unlock(&dev->device_lock);
254	return rets;
255}
256/**
257 * mei_write - the write function.
258 *
259 * @file: pointer to file structure
260 * @ubuf: pointer to user buffer
261 * @length: buffer length
262 * @offset: data offset in buffer
263 *
264 * Return: >=0 data length on success , <0 on error
265 */
266static ssize_t mei_write(struct file *file, const char __user *ubuf,
267			 size_t length, loff_t *offset)
268{
269	struct mei_cl *cl = file->private_data;
270	struct mei_cl_cb *cb;
271	struct mei_device *dev;
272	int rets;
273
274	if (WARN_ON(!cl || !cl->dev))
275		return -ENODEV;
276
277	dev = cl->dev;
278
279	mutex_lock(&dev->device_lock);
280
281	if (dev->dev_state != MEI_DEV_ENABLED) {
282		rets = -ENODEV;
283		goto out;
284	}
285
286	if (!mei_cl_is_connected(cl)) {
287		cl_err(dev, cl, "is not connected");
288		rets = -ENODEV;
289		goto out;
290	}
291
292	if (!mei_me_cl_is_active(cl->me_cl)) {
293		rets = -ENOTTY;
294		goto out;
295	}
296
297	if (length > mei_cl_mtu(cl)) {
298		rets = -EFBIG;
299		goto out;
300	}
301
302	if (length == 0) {
303		rets = 0;
304		goto out;
305	}
306
307	*offset = 0;
308	cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, file);
309	if (!cb) {
310		rets = -ENOMEM;
311		goto out;
312	}
313
314	rets = copy_from_user(cb->buf.data, ubuf, length);
315	if (rets) {
316		dev_dbg(dev->dev, "failed to copy data from userland\n");
317		rets = -EFAULT;
318		mei_io_cb_free(cb);
319		goto out;
320	}
321
322	if (cl == &dev->iamthif_cl) {
323		rets = mei_amthif_write(cl, cb);
324		if (!rets)
325			rets = length;
326		goto out;
327	}
328
329	rets = mei_cl_write(cl, cb);
330out:
331	mutex_unlock(&dev->device_lock);
332	return rets;
333}
334
335/**
336 * mei_ioctl_connect_client - the connect to fw client IOCTL function
337 *
338 * @file: private data of the file object
339 * @data: IOCTL connect data, input and output parameters
340 *
341 * Locking: called under "dev->device_lock" lock
342 *
343 * Return: 0 on success, <0 on failure.
344 */
345static int mei_ioctl_connect_client(struct file *file,
346			struct mei_connect_client_data *data)
347{
348	struct mei_device *dev;
349	struct mei_client *client;
350	struct mei_me_client *me_cl;
351	struct mei_cl *cl;
352	int rets;
353
354	cl = file->private_data;
355	dev = cl->dev;
356
357	if (dev->dev_state != MEI_DEV_ENABLED)
358		return -ENODEV;
359
360	if (cl->state != MEI_FILE_INITIALIZING &&
361	    cl->state != MEI_FILE_DISCONNECTED)
362		return  -EBUSY;
363
364	/* find ME client we're trying to connect to */
365	me_cl = mei_me_cl_by_uuid(dev, &data->in_client_uuid);
366	if (!me_cl) {
367		dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
368			&data->in_client_uuid);
369		rets = -ENOTTY;
370		goto end;
371	}
372
373	if (me_cl->props.fixed_address) {
374		bool forbidden = dev->override_fixed_address ?
375			 !dev->allow_fixed_address : !dev->hbm_f_fa_supported;
376		if (forbidden) {
377			dev_dbg(dev->dev, "Connection forbidden to FW Client UUID = %pUl\n",
378				&data->in_client_uuid);
379			rets = -ENOTTY;
380			goto end;
381		}
382	}
383
384	dev_dbg(dev->dev, "Connect to FW Client ID = %d\n",
385			me_cl->client_id);
386	dev_dbg(dev->dev, "FW Client - Protocol Version = %d\n",
387			me_cl->props.protocol_version);
388	dev_dbg(dev->dev, "FW Client - Max Msg Len = %d\n",
389			me_cl->props.max_msg_length);
390
391	/* if we're connecting to amthif client then we will use the
392	 * existing connection
393	 */
394	if (uuid_le_cmp(data->in_client_uuid, mei_amthif_guid) == 0) {
395		dev_dbg(dev->dev, "FW Client is amthi\n");
396		if (!mei_cl_is_connected(&dev->iamthif_cl)) {
397			rets = -ENODEV;
398			goto end;
399		}
400		mei_cl_unlink(cl);
401
402		kfree(cl);
403		cl = NULL;
404		dev->iamthif_open_count++;
405		file->private_data = &dev->iamthif_cl;
406
407		client = &data->out_client_properties;
408		client->max_msg_length = me_cl->props.max_msg_length;
409		client->protocol_version = me_cl->props.protocol_version;
410		rets = dev->iamthif_cl.status;
411
412		goto end;
413	}
414
415	/* prepare the output buffer */
416	client = &data->out_client_properties;
417	client->max_msg_length = me_cl->props.max_msg_length;
418	client->protocol_version = me_cl->props.protocol_version;
419	dev_dbg(dev->dev, "Can connect?\n");
420
421	rets = mei_cl_connect(cl, me_cl, file);
422
423end:
424	mei_me_cl_put(me_cl);
425	return rets;
426}
427
428/**
429 * mei_ioctl_client_notify_request -
430 *     propagate event notification request to client
431 *
432 * @file: pointer to file structure
433 * @request: 0 - disable, 1 - enable
434 *
435 * Return: 0 on success , <0 on error
436 */
437static int mei_ioctl_client_notify_request(const struct file *file, u32 request)
438{
439	struct mei_cl *cl = file->private_data;
440
441	if (request != MEI_HBM_NOTIFICATION_START &&
442	    request != MEI_HBM_NOTIFICATION_STOP)
443		return -EINVAL;
444
445	return mei_cl_notify_request(cl, file, (u8)request);
446}
447
448/**
449 * mei_ioctl_client_notify_get -  wait for notification request
450 *
451 * @file: pointer to file structure
452 * @notify_get: 0 - disable, 1 - enable
453 *
454 * Return: 0 on success , <0 on error
455 */
456static int mei_ioctl_client_notify_get(const struct file *file, u32 *notify_get)
457{
458	struct mei_cl *cl = file->private_data;
459	bool notify_ev;
460	bool block = (file->f_flags & O_NONBLOCK) == 0;
461	int rets;
462
463	rets = mei_cl_notify_get(cl, block, &notify_ev);
464	if (rets)
465		return rets;
466
467	*notify_get = notify_ev ? 1 : 0;
468	return 0;
469}
470
471/**
472 * mei_ioctl - the IOCTL function
473 *
474 * @file: pointer to file structure
475 * @cmd: ioctl command
476 * @data: pointer to mei message structure
477 *
478 * Return: 0 on success , <0 on error
479 */
480static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
481{
482	struct mei_device *dev;
483	struct mei_cl *cl = file->private_data;
484	struct mei_connect_client_data connect_data;
485	u32 notify_get, notify_req;
486	int rets;
487
488
489	if (WARN_ON(!cl || !cl->dev))
490		return -ENODEV;
491
492	dev = cl->dev;
493
494	dev_dbg(dev->dev, "IOCTL cmd = 0x%x", cmd);
495
496	mutex_lock(&dev->device_lock);
497	if (dev->dev_state != MEI_DEV_ENABLED) {
498		rets = -ENODEV;
499		goto out;
500	}
501
502	switch (cmd) {
503	case IOCTL_MEI_CONNECT_CLIENT:
504		dev_dbg(dev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
505		if (copy_from_user(&connect_data, (char __user *)data,
506				sizeof(struct mei_connect_client_data))) {
507			dev_dbg(dev->dev, "failed to copy data from userland\n");
508			rets = -EFAULT;
509			goto out;
510		}
511
512		rets = mei_ioctl_connect_client(file, &connect_data);
513		if (rets)
514			goto out;
515
516		/* if all is ok, copying the data back to user. */
517		if (copy_to_user((char __user *)data, &connect_data,
518				sizeof(struct mei_connect_client_data))) {
519			dev_dbg(dev->dev, "failed to copy data to userland\n");
520			rets = -EFAULT;
521			goto out;
522		}
523
524		break;
525
526	case IOCTL_MEI_NOTIFY_SET:
527		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_SET.\n");
528		if (copy_from_user(&notify_req,
529				   (char __user *)data, sizeof(notify_req))) {
530			dev_dbg(dev->dev, "failed to copy data from userland\n");
531			rets = -EFAULT;
532			goto out;
533		}
534		rets = mei_ioctl_client_notify_request(file, notify_req);
535		break;
536
537	case IOCTL_MEI_NOTIFY_GET:
538		dev_dbg(dev->dev, ": IOCTL_MEI_NOTIFY_GET.\n");
539		rets = mei_ioctl_client_notify_get(file, &notify_get);
540		if (rets)
541			goto out;
542
543		dev_dbg(dev->dev, "copy connect data to user\n");
544		if (copy_to_user((char __user *)data,
545				&notify_get, sizeof(notify_get))) {
546			dev_dbg(dev->dev, "failed to copy data to userland\n");
547			rets = -EFAULT;
548			goto out;
549
550		}
551		break;
552
553	default:
554		dev_err(dev->dev, ": unsupported ioctl %d.\n", cmd);
555		rets = -ENOIOCTLCMD;
556	}
557
558out:
559	mutex_unlock(&dev->device_lock);
560	return rets;
561}
562
563/**
564 * mei_compat_ioctl - the compat IOCTL function
565 *
566 * @file: pointer to file structure
567 * @cmd: ioctl command
568 * @data: pointer to mei message structure
569 *
570 * Return: 0 on success , <0 on error
571 */
572#ifdef CONFIG_COMPAT
573static long mei_compat_ioctl(struct file *file,
574			unsigned int cmd, unsigned long data)
575{
576	return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
577}
578#endif
579
580
581/**
582 * mei_poll - the poll function
583 *
584 * @file: pointer to file structure
585 * @wait: pointer to poll_table structure
586 *
587 * Return: poll mask
588 */
589static unsigned int mei_poll(struct file *file, poll_table *wait)
590{
591	unsigned long req_events = poll_requested_events(wait);
592	struct mei_cl *cl = file->private_data;
593	struct mei_device *dev;
594	unsigned int mask = 0;
595	bool notify_en;
596
597	if (WARN_ON(!cl || !cl->dev))
598		return POLLERR;
599
600	dev = cl->dev;
601
602	mutex_lock(&dev->device_lock);
603
604	notify_en = cl->notify_en && (req_events & POLLPRI);
605
606	if (dev->dev_state != MEI_DEV_ENABLED ||
607	    !mei_cl_is_connected(cl)) {
608		mask = POLLERR;
609		goto out;
610	}
611
612	if (notify_en) {
613		poll_wait(file, &cl->ev_wait, wait);
614		if (cl->notify_ev)
615			mask |= POLLPRI;
616	}
617
618	if (cl == &dev->iamthif_cl) {
619		mask |= mei_amthif_poll(file, wait);
620		goto out;
621	}
622
623	if (req_events & (POLLIN | POLLRDNORM)) {
624		poll_wait(file, &cl->rx_wait, wait);
625
626		if (!list_empty(&cl->rd_completed))
627			mask |= POLLIN | POLLRDNORM;
628		else
629			mei_cl_read_start(cl, mei_cl_mtu(cl), file);
630	}
631
632out:
633	mutex_unlock(&dev->device_lock);
634	return mask;
635}
636
637/**
638 * mei_fasync - asynchronous io support
639 *
640 * @fd: file descriptor
641 * @file: pointer to file structure
642 * @band: band bitmap
643 *
644 * Return: negative on error,
645 *         0 if it did no changes,
646 *         and positive a process was added or deleted
647 */
648static int mei_fasync(int fd, struct file *file, int band)
649{
650
651	struct mei_cl *cl = file->private_data;
652
653	if (!mei_cl_is_connected(cl))
654		return -ENODEV;
655
656	return fasync_helper(fd, file, band, &cl->ev_async);
657}
658
659/**
660 * fw_status_show - mei device fw_status attribute show method
661 *
662 * @device: device pointer
663 * @attr: attribute pointer
664 * @buf:  char out buffer
665 *
666 * Return: number of the bytes printed into buf or error
667 */
668static ssize_t fw_status_show(struct device *device,
669		struct device_attribute *attr, char *buf)
670{
671	struct mei_device *dev = dev_get_drvdata(device);
672	struct mei_fw_status fw_status;
673	int err, i;
674	ssize_t cnt = 0;
675
676	mutex_lock(&dev->device_lock);
677	err = mei_fw_status(dev, &fw_status);
678	mutex_unlock(&dev->device_lock);
679	if (err) {
680		dev_err(device, "read fw_status error = %d\n", err);
681		return err;
682	}
683
684	for (i = 0; i < fw_status.count; i++)
685		cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
686				fw_status.status[i]);
687	return cnt;
688}
689static DEVICE_ATTR_RO(fw_status);
690
691/**
692 * hbm_ver_show - display HBM protocol version negotiated with FW
693 *
694 * @device: device pointer
695 * @attr: attribute pointer
696 * @buf:  char out buffer
697 *
698 * Return: number of the bytes printed into buf or error
699 */
700static ssize_t hbm_ver_show(struct device *device,
701			    struct device_attribute *attr, char *buf)
702{
703	struct mei_device *dev = dev_get_drvdata(device);
704	struct hbm_version ver;
705
706	mutex_lock(&dev->device_lock);
707	ver = dev->version;
708	mutex_unlock(&dev->device_lock);
709
710	return sprintf(buf, "%u.%u\n", ver.major_version, ver.minor_version);
711}
712static DEVICE_ATTR_RO(hbm_ver);
713
714/**
715 * hbm_ver_drv_show - display HBM protocol version advertised by driver
716 *
717 * @device: device pointer
718 * @attr: attribute pointer
719 * @buf:  char out buffer
720 *
721 * Return: number of the bytes printed into buf or error
722 */
723static ssize_t hbm_ver_drv_show(struct device *device,
724				struct device_attribute *attr, char *buf)
725{
726	return sprintf(buf, "%u.%u\n", HBM_MAJOR_VERSION, HBM_MINOR_VERSION);
727}
728static DEVICE_ATTR_RO(hbm_ver_drv);
729
730static struct attribute *mei_attrs[] = {
731	&dev_attr_fw_status.attr,
732	&dev_attr_hbm_ver.attr,
733	&dev_attr_hbm_ver_drv.attr,
734	NULL
735};
736ATTRIBUTE_GROUPS(mei);
737
738/*
739 * file operations structure will be used for mei char device.
740 */
741static const struct file_operations mei_fops = {
742	.owner = THIS_MODULE,
743	.read = mei_read,
744	.unlocked_ioctl = mei_ioctl,
745#ifdef CONFIG_COMPAT
746	.compat_ioctl = mei_compat_ioctl,
747#endif
748	.open = mei_open,
749	.release = mei_release,
750	.write = mei_write,
751	.poll = mei_poll,
752	.fasync = mei_fasync,
753	.llseek = no_llseek
754};
755
756static struct class *mei_class;
757static dev_t mei_devt;
758#define MEI_MAX_DEVS  MINORMASK
759static DEFINE_MUTEX(mei_minor_lock);
760static DEFINE_IDR(mei_idr);
761
762/**
763 * mei_minor_get - obtain next free device minor number
764 *
765 * @dev:  device pointer
766 *
767 * Return: allocated minor, or -ENOSPC if no free minor left
768 */
769static int mei_minor_get(struct mei_device *dev)
770{
771	int ret;
772
773	mutex_lock(&mei_minor_lock);
774	ret = idr_alloc(&mei_idr, dev, 0, MEI_MAX_DEVS, GFP_KERNEL);
775	if (ret >= 0)
776		dev->minor = ret;
777	else if (ret == -ENOSPC)
778		dev_err(dev->dev, "too many mei devices\n");
779
780	mutex_unlock(&mei_minor_lock);
781	return ret;
782}
783
784/**
785 * mei_minor_free - mark device minor number as free
786 *
787 * @dev:  device pointer
788 */
789static void mei_minor_free(struct mei_device *dev)
790{
791	mutex_lock(&mei_minor_lock);
792	idr_remove(&mei_idr, dev->minor);
793	mutex_unlock(&mei_minor_lock);
794}
795
796int mei_register(struct mei_device *dev, struct device *parent)
797{
798	struct device *clsdev; /* class device */
799	int ret, devno;
800
801	ret = mei_minor_get(dev);
802	if (ret < 0)
803		return ret;
804
805	/* Fill in the data structures */
806	devno = MKDEV(MAJOR(mei_devt), dev->minor);
807	cdev_init(&dev->cdev, &mei_fops);
808	dev->cdev.owner = parent->driver->owner;
809
810	/* Add the device */
811	ret = cdev_add(&dev->cdev, devno, 1);
812	if (ret) {
813		dev_err(parent, "unable to add device %d:%d\n",
814			MAJOR(mei_devt), dev->minor);
815		goto err_dev_add;
816	}
817
818	clsdev = device_create_with_groups(mei_class, parent, devno,
819					   dev, mei_groups,
820					   "mei%d", dev->minor);
821
822	if (IS_ERR(clsdev)) {
823		dev_err(parent, "unable to create device %d:%d\n",
824			MAJOR(mei_devt), dev->minor);
825		ret = PTR_ERR(clsdev);
826		goto err_dev_create;
827	}
828
829	ret = mei_dbgfs_register(dev, dev_name(clsdev));
830	if (ret) {
831		dev_err(clsdev, "cannot register debugfs ret = %d\n", ret);
832		goto err_dev_dbgfs;
833	}
834
835	return 0;
836
837err_dev_dbgfs:
838	device_destroy(mei_class, devno);
839err_dev_create:
840	cdev_del(&dev->cdev);
841err_dev_add:
842	mei_minor_free(dev);
843	return ret;
844}
845EXPORT_SYMBOL_GPL(mei_register);
846
847void mei_deregister(struct mei_device *dev)
848{
849	int devno;
850
851	devno = dev->cdev.dev;
852	cdev_del(&dev->cdev);
853
854	mei_dbgfs_deregister(dev);
855
856	device_destroy(mei_class, devno);
857
858	mei_minor_free(dev);
859}
860EXPORT_SYMBOL_GPL(mei_deregister);
861
862static int __init mei_init(void)
863{
864	int ret;
865
866	mei_class = class_create(THIS_MODULE, "mei");
867	if (IS_ERR(mei_class)) {
868		pr_err("couldn't create class\n");
869		ret = PTR_ERR(mei_class);
870		goto err;
871	}
872
873	ret = alloc_chrdev_region(&mei_devt, 0, MEI_MAX_DEVS, "mei");
874	if (ret < 0) {
875		pr_err("unable to allocate char dev region\n");
876		goto err_class;
877	}
878
879	ret = mei_cl_bus_init();
880	if (ret < 0) {
881		pr_err("unable to initialize bus\n");
882		goto err_chrdev;
883	}
884
885	return 0;
886
887err_chrdev:
888	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
889err_class:
890	class_destroy(mei_class);
891err:
892	return ret;
893}
894
895static void __exit mei_exit(void)
896{
897	unregister_chrdev_region(mei_devt, MEI_MAX_DEVS);
898	class_destroy(mei_class);
899	mei_cl_bus_exit();
900}
901
902module_init(mei_init);
903module_exit(mei_exit);
904
905MODULE_AUTHOR("Intel Corporation");
906MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
907MODULE_LICENSE("GPL v2");
908