Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2012-2023, Intel Corporation. All rights reserved.
4 * Intel Management Engine Interface (Intel MEI) Linux driver
5 */
6
7#include <linux/module.h>
8#include <linux/device.h>
9#include <linux/kernel.h>
10#include <linux/sched/signal.h>
11#include <linux/init.h>
12#include <linux/errno.h>
13#include <linux/slab.h>
14#include <linux/mutex.h>
15#include <linux/interrupt.h>
16#include <linux/scatterlist.h>
17#include <linux/mei_cl_bus.h>
18
19#include "mei_dev.h"
20#include "client.h"
21
22#define to_mei_cl_driver(d) container_of_const(d, struct mei_cl_driver, driver)
23
24/**
25 * __mei_cl_send - internal client send (write)
26 *
27 * @cl: host client
28 * @buf: buffer to send
29 * @length: buffer length
30 * @vtag: virtual tag
31 * @mode: sending mode
32 *
33 * Return: written size bytes or < 0 on error
34 */
35ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
36 unsigned int mode)
37{
38 return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
39}
40
41/**
42 * __mei_cl_send_timeout - internal client send (write)
43 *
44 * @cl: host client
45 * @buf: buffer to send
46 * @length: buffer length
47 * @vtag: virtual tag
48 * @mode: sending mode
49 * @timeout: send timeout in milliseconds.
50 * effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
51 * set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
52 *
53 * Return: written size bytes or < 0 on error
54 */
55ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
56 unsigned int mode, unsigned long timeout)
57{
58 struct mei_device *bus;
59 struct mei_cl_cb *cb;
60 ssize_t rets;
61
62 if (WARN_ON(!cl || !cl->dev))
63 return -ENODEV;
64
65 bus = cl->dev;
66
67 mutex_lock(&bus->device_lock);
68 if (bus->dev_state != MEI_DEV_ENABLED &&
69 bus->dev_state != MEI_DEV_POWERING_DOWN) {
70 rets = -ENODEV;
71 goto out;
72 }
73
74 if (!mei_cl_is_connected(cl)) {
75 rets = -ENODEV;
76 goto out;
77 }
78
79 /* Check if we have an ME client device */
80 if (!mei_me_cl_is_active(cl->me_cl)) {
81 rets = -ENOTTY;
82 goto out;
83 }
84
85 if (vtag) {
86 /* Check if vtag is supported by client */
87 rets = mei_cl_vt_support_check(cl);
88 if (rets)
89 goto out;
90 }
91
92 if (length > mei_cl_mtu(cl)) {
93 rets = -EFBIG;
94 goto out;
95 }
96
97 while (cl->tx_cb_queued >= bus->tx_queue_limit) {
98 mutex_unlock(&bus->device_lock);
99 rets = wait_event_interruptible(cl->tx_wait,
100 cl->writing_state == MEI_WRITE_COMPLETE ||
101 (!mei_cl_is_connected(cl)));
102 mutex_lock(&bus->device_lock);
103 if (rets) {
104 if (signal_pending(current))
105 rets = -EINTR;
106 goto out;
107 }
108 if (!mei_cl_is_connected(cl)) {
109 rets = -ENODEV;
110 goto out;
111 }
112 }
113
114 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
115 if (!cb) {
116 rets = -ENOMEM;
117 goto out;
118 }
119 cb->vtag = vtag;
120
121 cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
122 cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
123 memcpy(cb->buf.data, buf, length);
124 /* hack we point data to header */
125 if (mode & MEI_CL_IO_SGL) {
126 cb->ext_hdr = (struct mei_ext_hdr *)cb->buf.data;
127 cb->buf.data = NULL;
128 cb->buf.size = 0;
129 }
130
131 rets = mei_cl_write(cl, cb, timeout);
132
133 if (mode & MEI_CL_IO_SGL && rets == 0)
134 rets = length;
135
136out:
137 mutex_unlock(&bus->device_lock);
138
139 return rets;
140}
141
142/**
143 * __mei_cl_recv - internal client receive (read)
144 *
145 * @cl: host client
146 * @buf: buffer to receive
147 * @length: buffer length
148 * @vtag: virtual tag
149 * @mode: io mode
150 * @timeout: recv timeout, 0 for infinite timeout
151 *
152 * Return: read size in bytes of < 0 on error
153 */
154ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
155 unsigned int mode, unsigned long timeout)
156{
157 struct mei_device *bus;
158 struct mei_cl_cb *cb;
159 size_t r_length;
160 ssize_t rets;
161 bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
162
163 if (WARN_ON(!cl || !cl->dev))
164 return -ENODEV;
165
166 bus = cl->dev;
167
168 mutex_lock(&bus->device_lock);
169 if (bus->dev_state != MEI_DEV_ENABLED &&
170 bus->dev_state != MEI_DEV_POWERING_DOWN) {
171 rets = -ENODEV;
172 goto out;
173 }
174
175 cb = mei_cl_read_cb(cl, NULL);
176 if (cb)
177 goto copy;
178
179 rets = mei_cl_read_start(cl, length, NULL);
180 if (rets && rets != -EBUSY)
181 goto out;
182
183 if (nonblock) {
184 rets = -EAGAIN;
185 goto out;
186 }
187
188 /* wait on event only if there is no other waiter */
189 /* synchronized under device mutex */
190 if (!waitqueue_active(&cl->rx_wait)) {
191
192 mutex_unlock(&bus->device_lock);
193
194 if (timeout) {
195 rets = wait_event_interruptible_timeout
196 (cl->rx_wait,
197 mei_cl_read_cb(cl, NULL) ||
198 (!mei_cl_is_connected(cl)),
199 msecs_to_jiffies(timeout));
200 if (rets == 0)
201 return -ETIME;
202 if (rets < 0) {
203 if (signal_pending(current))
204 return -EINTR;
205 return -ERESTARTSYS;
206 }
207 } else {
208 if (wait_event_interruptible
209 (cl->rx_wait,
210 mei_cl_read_cb(cl, NULL) ||
211 (!mei_cl_is_connected(cl)))) {
212 if (signal_pending(current))
213 return -EINTR;
214 return -ERESTARTSYS;
215 }
216 }
217
218 mutex_lock(&bus->device_lock);
219
220 if (!mei_cl_is_connected(cl)) {
221 rets = -ENODEV;
222 goto out;
223 }
224 }
225
226 cb = mei_cl_read_cb(cl, NULL);
227 if (!cb) {
228 rets = 0;
229 goto out;
230 }
231
232copy:
233 if (cb->status) {
234 rets = cb->status;
235 goto free;
236 }
237
238 /* for the GSC type - copy the extended header to the buffer */
239 if (cb->ext_hdr && cb->ext_hdr->type == MEI_EXT_HDR_GSC) {
240 r_length = min_t(size_t, length, cb->ext_hdr->length * sizeof(u32));
241 memcpy(buf, cb->ext_hdr, r_length);
242 } else {
243 r_length = min_t(size_t, length, cb->buf_idx);
244 memcpy(buf, cb->buf.data, r_length);
245 }
246 rets = r_length;
247
248 if (vtag)
249 *vtag = cb->vtag;
250
251free:
252 mei_cl_del_rd_completed(cl, cb);
253out:
254 mutex_unlock(&bus->device_lock);
255
256 return rets;
257}
258
259/**
260 * mei_cldev_send_vtag - me device send with vtag (write)
261 *
262 * @cldev: me client device
263 * @buf: buffer to send
264 * @length: buffer length
265 * @vtag: virtual tag
266 *
267 * Return:
268 * * written size in bytes
269 * * < 0 on error
270 */
271
272ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
273 size_t length, u8 vtag)
274{
275 struct mei_cl *cl = cldev->cl;
276
277 return __mei_cl_send(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING);
278}
279EXPORT_SYMBOL_GPL(mei_cldev_send_vtag);
280
281/**
282 * mei_cldev_send_vtag_timeout - me device send with vtag and timeout (write)
283 *
284 * @cldev: me client device
285 * @buf: buffer to send
286 * @length: buffer length
287 * @vtag: virtual tag
288 * @timeout: send timeout in milliseconds, 0 for infinite timeout
289 *
290 * Return:
291 * * written size in bytes
292 * * < 0 on error
293 */
294
295ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
296 size_t length, u8 vtag, unsigned long timeout)
297{
298 struct mei_cl *cl = cldev->cl;
299
300 return __mei_cl_send_timeout(cl, buf, length, vtag, MEI_CL_IO_TX_BLOCKING, timeout);
301}
302EXPORT_SYMBOL_GPL(mei_cldev_send_vtag_timeout);
303
304/**
305 * mei_cldev_recv_vtag - client receive with vtag (read)
306 *
307 * @cldev: me client device
308 * @buf: buffer to receive
309 * @length: buffer length
310 * @vtag: virtual tag
311 *
312 * Return:
313 * * read size in bytes
314 * * < 0 on error
315 */
316
317ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
318 u8 *vtag)
319{
320 struct mei_cl *cl = cldev->cl;
321
322 return __mei_cl_recv(cl, buf, length, vtag, 0, 0);
323}
324EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag);
325
326/**
327 * mei_cldev_recv_nonblock_vtag - non block client receive with vtag (read)
328 *
329 * @cldev: me client device
330 * @buf: buffer to receive
331 * @length: buffer length
332 * @vtag: virtual tag
333 *
334 * Return:
335 * * read size in bytes
336 * * -EAGAIN if function will block.
337 * * < 0 on other error
338 */
339ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
340 size_t length, u8 *vtag)
341{
342 struct mei_cl *cl = cldev->cl;
343
344 return __mei_cl_recv(cl, buf, length, vtag, MEI_CL_IO_RX_NONBLOCK, 0);
345}
346EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock_vtag);
347
348/**
349 * mei_cldev_recv_timeout - client receive with timeout (read)
350 *
351 * @cldev: me client device
352 * @buf: buffer to receive
353 * @length: buffer length
354 * @timeout: send timeout in milliseconds, 0 for infinite timeout
355 *
356 * Return:
357 * * read size in bytes
358 * * < 0 on error
359 */
360ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
361 unsigned long timeout)
362{
363 return mei_cldev_recv_vtag_timeout(cldev, buf, length, NULL, timeout);
364}
365EXPORT_SYMBOL_GPL(mei_cldev_recv_timeout);
366
367/**
368 * mei_cldev_recv_vtag_timeout - client receive with vtag (read)
369 *
370 * @cldev: me client device
371 * @buf: buffer to receive
372 * @length: buffer length
373 * @vtag: virtual tag
374 * @timeout: recv timeout in milliseconds, 0 for infinite timeout
375 *
376 * Return:
377 * * read size in bytes
378 * * < 0 on error
379 */
380
381ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
382 u8 *vtag, unsigned long timeout)
383{
384 struct mei_cl *cl = cldev->cl;
385
386 return __mei_cl_recv(cl, buf, length, vtag, 0, timeout);
387}
388EXPORT_SYMBOL_GPL(mei_cldev_recv_vtag_timeout);
389
390/**
391 * mei_cldev_send - me device send (write)
392 *
393 * @cldev: me client device
394 * @buf: buffer to send
395 * @length: buffer length
396 *
397 * Return:
398 * * written size in bytes
399 * * < 0 on error
400 */
401ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf, size_t length)
402{
403 return mei_cldev_send_vtag(cldev, buf, length, 0);
404}
405EXPORT_SYMBOL_GPL(mei_cldev_send);
406
407/**
408 * mei_cldev_send_timeout - me device send with timeout (write)
409 *
410 * @cldev: me client device
411 * @buf: buffer to send
412 * @length: buffer length
413 * @timeout: send timeout in milliseconds, 0 for infinite timeout
414 *
415 * Return:
416 * * written size in bytes
417 * * < 0 on error
418 */
419ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf, size_t length,
420 unsigned long timeout)
421{
422 return mei_cldev_send_vtag_timeout(cldev, buf, length, 0, timeout);
423}
424EXPORT_SYMBOL_GPL(mei_cldev_send_timeout);
425
426/**
427 * mei_cldev_recv - client receive (read)
428 *
429 * @cldev: me client device
430 * @buf: buffer to receive
431 * @length: buffer length
432 *
433 * Return: read size in bytes of < 0 on error
434 */
435ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
436{
437 return mei_cldev_recv_vtag(cldev, buf, length, NULL);
438}
439EXPORT_SYMBOL_GPL(mei_cldev_recv);
440
441/**
442 * mei_cldev_recv_nonblock - non block client receive (read)
443 *
444 * @cldev: me client device
445 * @buf: buffer to receive
446 * @length: buffer length
447 *
448 * Return: read size in bytes of < 0 on error
449 * -EAGAIN if function will block.
450 */
451ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
452 size_t length)
453{
454 return mei_cldev_recv_nonblock_vtag(cldev, buf, length, NULL);
455}
456EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
457
458/**
459 * mei_cl_bus_rx_work - dispatch rx event for a bus device
460 *
461 * @work: work
462 */
463static void mei_cl_bus_rx_work(struct work_struct *work)
464{
465 struct mei_cl_device *cldev;
466 struct mei_device *bus;
467
468 cldev = container_of(work, struct mei_cl_device, rx_work);
469
470 bus = cldev->bus;
471
472 if (cldev->rx_cb)
473 cldev->rx_cb(cldev);
474
475 mutex_lock(&bus->device_lock);
476 if (mei_cl_is_connected(cldev->cl))
477 mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
478 mutex_unlock(&bus->device_lock);
479}
480
481/**
482 * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
483 *
484 * @work: work
485 */
486static void mei_cl_bus_notif_work(struct work_struct *work)
487{
488 struct mei_cl_device *cldev;
489
490 cldev = container_of(work, struct mei_cl_device, notif_work);
491
492 if (cldev->notif_cb)
493 cldev->notif_cb(cldev);
494}
495
496/**
497 * mei_cl_bus_notify_event - schedule notify cb on bus client
498 *
499 * @cl: host client
500 *
501 * Return: true if event was scheduled
502 * false if the client is not waiting for event
503 */
504bool mei_cl_bus_notify_event(struct mei_cl *cl)
505{
506 struct mei_cl_device *cldev = cl->cldev;
507
508 if (!cldev || !cldev->notif_cb)
509 return false;
510
511 if (!cl->notify_ev)
512 return false;
513
514 schedule_work(&cldev->notif_work);
515
516 cl->notify_ev = false;
517
518 return true;
519}
520
521/**
522 * mei_cl_bus_rx_event - schedule rx event
523 *
524 * @cl: host client
525 *
526 * Return: true if event was scheduled
527 * false if the client is not waiting for event
528 */
529bool mei_cl_bus_rx_event(struct mei_cl *cl)
530{
531 struct mei_cl_device *cldev = cl->cldev;
532
533 if (!cldev || !cldev->rx_cb)
534 return false;
535
536 schedule_work(&cldev->rx_work);
537
538 return true;
539}
540
541/**
542 * mei_cldev_register_rx_cb - register Rx event callback
543 *
544 * @cldev: me client devices
545 * @rx_cb: callback function
546 *
547 * Return: 0 on success
548 * -EALREADY if an callback is already registered
549 * <0 on other errors
550 */
551int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
552{
553 struct mei_device *bus = cldev->bus;
554 int ret;
555
556 if (!rx_cb)
557 return -EINVAL;
558 if (cldev->rx_cb)
559 return -EALREADY;
560
561 cldev->rx_cb = rx_cb;
562 INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
563
564 mutex_lock(&bus->device_lock);
565 if (mei_cl_is_connected(cldev->cl))
566 ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
567 else
568 ret = -ENODEV;
569 mutex_unlock(&bus->device_lock);
570 if (ret && ret != -EBUSY) {
571 cancel_work_sync(&cldev->rx_work);
572 cldev->rx_cb = NULL;
573 return ret;
574 }
575
576 return 0;
577}
578EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
579
580/**
581 * mei_cldev_register_notif_cb - register FW notification event callback
582 *
583 * @cldev: me client devices
584 * @notif_cb: callback function
585 *
586 * Return: 0 on success
587 * -EALREADY if an callback is already registered
588 * <0 on other errors
589 */
590int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
591 mei_cldev_cb_t notif_cb)
592{
593 struct mei_device *bus = cldev->bus;
594 int ret;
595
596 if (!notif_cb)
597 return -EINVAL;
598
599 if (cldev->notif_cb)
600 return -EALREADY;
601
602 cldev->notif_cb = notif_cb;
603 INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
604
605 mutex_lock(&bus->device_lock);
606 ret = mei_cl_notify_request(cldev->cl, NULL, 1);
607 mutex_unlock(&bus->device_lock);
608 if (ret) {
609 cancel_work_sync(&cldev->notif_work);
610 cldev->notif_cb = NULL;
611 return ret;
612 }
613
614 return 0;
615}
616EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
617
618/**
619 * mei_cldev_get_drvdata - driver data getter
620 *
621 * @cldev: mei client device
622 *
623 * Return: driver private data
624 */
625void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
626{
627 return dev_get_drvdata(&cldev->dev);
628}
629EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
630
631/**
632 * mei_cldev_set_drvdata - driver data setter
633 *
634 * @cldev: mei client device
635 * @data: data to store
636 */
637void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
638{
639 dev_set_drvdata(&cldev->dev, data);
640}
641EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
642
643/**
644 * mei_cldev_uuid - return uuid of the underlying me client
645 *
646 * @cldev: mei client device
647 *
648 * Return: me client uuid
649 */
650const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
651{
652 return mei_me_cl_uuid(cldev->me_cl);
653}
654EXPORT_SYMBOL_GPL(mei_cldev_uuid);
655
656/**
657 * mei_cldev_ver - return protocol version of the underlying me client
658 *
659 * @cldev: mei client device
660 *
661 * Return: me client protocol version
662 */
663u8 mei_cldev_ver(const struct mei_cl_device *cldev)
664{
665 return mei_me_cl_ver(cldev->me_cl);
666}
667EXPORT_SYMBOL_GPL(mei_cldev_ver);
668
669/**
670 * mei_cldev_enabled - check whether the device is enabled
671 *
672 * @cldev: mei client device
673 *
674 * Return: true if me client is initialized and connected
675 */
676bool mei_cldev_enabled(const struct mei_cl_device *cldev)
677{
678 return mei_cl_is_connected(cldev->cl);
679}
680EXPORT_SYMBOL_GPL(mei_cldev_enabled);
681
682/**
683 * mei_cl_bus_module_get - acquire module of the underlying
684 * hw driver.
685 *
686 * @cldev: mei client device
687 *
688 * Return: true on success; false if the module was removed.
689 */
690static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
691{
692 return try_module_get(cldev->bus->dev->driver->owner);
693}
694
695/**
696 * mei_cl_bus_module_put - release the underlying hw module.
697 *
698 * @cldev: mei client device
699 */
700static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
701{
702 module_put(cldev->bus->dev->driver->owner);
703}
704
705/**
706 * mei_cl_bus_vtag - get bus vtag entry wrapper
707 * The tag for bus client is always first.
708 *
709 * @cl: host client
710 *
711 * Return: bus vtag or NULL
712 */
713static inline struct mei_cl_vtag *mei_cl_bus_vtag(struct mei_cl *cl)
714{
715 return list_first_entry_or_null(&cl->vtag_map,
716 struct mei_cl_vtag, list);
717}
718
719/**
720 * mei_cl_bus_vtag_alloc - add bus client entry to vtag map
721 *
722 * @cldev: me client device
723 *
724 * Return:
725 * * 0 on success
726 * * -ENOMEM if memory allocation failed
727 */
728static int mei_cl_bus_vtag_alloc(struct mei_cl_device *cldev)
729{
730 struct mei_cl *cl = cldev->cl;
731 struct mei_cl_vtag *cl_vtag;
732
733 /*
734 * Bail out if the client does not supports vtags
735 * or has already allocated one
736 */
737 if (mei_cl_vt_support_check(cl) || mei_cl_bus_vtag(cl))
738 return 0;
739
740 cl_vtag = mei_cl_vtag_alloc(NULL, 0);
741 if (IS_ERR(cl_vtag))
742 return -ENOMEM;
743
744 list_add_tail(&cl_vtag->list, &cl->vtag_map);
745
746 return 0;
747}
748
749/**
750 * mei_cl_bus_vtag_free - remove the bus entry from vtag map
751 *
752 * @cldev: me client device
753 */
754static void mei_cl_bus_vtag_free(struct mei_cl_device *cldev)
755{
756 struct mei_cl *cl = cldev->cl;
757 struct mei_cl_vtag *cl_vtag;
758
759 cl_vtag = mei_cl_bus_vtag(cl);
760 if (!cl_vtag)
761 return;
762
763 list_del(&cl_vtag->list);
764 kfree(cl_vtag);
765}
766
767void *mei_cldev_dma_map(struct mei_cl_device *cldev, u8 buffer_id, size_t size)
768{
769 struct mei_device *bus;
770 struct mei_cl *cl;
771 int ret;
772
773 if (!cldev || !buffer_id || !size)
774 return ERR_PTR(-EINVAL);
775
776 if (!IS_ALIGNED(size, MEI_FW_PAGE_SIZE)) {
777 dev_err(&cldev->dev, "Map size should be aligned to %lu\n",
778 MEI_FW_PAGE_SIZE);
779 return ERR_PTR(-EINVAL);
780 }
781
782 cl = cldev->cl;
783 bus = cldev->bus;
784
785 mutex_lock(&bus->device_lock);
786 if (cl->state == MEI_FILE_UNINITIALIZED) {
787 ret = mei_cl_link(cl);
788 if (ret)
789 goto notlinked;
790 /* update pointers */
791 cl->cldev = cldev;
792 }
793
794 ret = mei_cl_dma_alloc_and_map(cl, NULL, buffer_id, size);
795 if (ret)
796 mei_cl_unlink(cl);
797notlinked:
798 mutex_unlock(&bus->device_lock);
799 if (ret)
800 return ERR_PTR(ret);
801 return cl->dma.vaddr;
802}
803EXPORT_SYMBOL_GPL(mei_cldev_dma_map);
804
805int mei_cldev_dma_unmap(struct mei_cl_device *cldev)
806{
807 struct mei_device *bus;
808 struct mei_cl *cl;
809 int ret;
810
811 if (!cldev)
812 return -EINVAL;
813
814 cl = cldev->cl;
815 bus = cldev->bus;
816
817 mutex_lock(&bus->device_lock);
818 ret = mei_cl_dma_unmap(cl, NULL);
819
820 mei_cl_flush_queues(cl, NULL);
821 mei_cl_unlink(cl);
822 mutex_unlock(&bus->device_lock);
823 return ret;
824}
825EXPORT_SYMBOL_GPL(mei_cldev_dma_unmap);
826
827/**
828 * mei_cldev_enable - enable me client device
829 * create connection with me client
830 *
831 * @cldev: me client device
832 *
833 * Return: 0 on success and < 0 on error
834 */
835int mei_cldev_enable(struct mei_cl_device *cldev)
836{
837 struct mei_device *bus = cldev->bus;
838 struct mei_cl *cl;
839 int ret;
840
841 cl = cldev->cl;
842
843 mutex_lock(&bus->device_lock);
844 if (cl->state == MEI_FILE_UNINITIALIZED) {
845 ret = mei_cl_link(cl);
846 if (ret)
847 goto notlinked;
848 /* update pointers */
849 cl->cldev = cldev;
850 }
851
852 if (mei_cl_is_connected(cl)) {
853 ret = 0;
854 goto out;
855 }
856
857 if (!mei_me_cl_is_active(cldev->me_cl)) {
858 dev_err(&cldev->dev, "me client is not active\n");
859 ret = -ENOTTY;
860 goto out;
861 }
862
863 ret = mei_cl_bus_vtag_alloc(cldev);
864 if (ret)
865 goto out;
866
867 ret = mei_cl_connect(cl, cldev->me_cl, NULL);
868 if (ret < 0) {
869 dev_err(&cldev->dev, "cannot connect\n");
870 mei_cl_bus_vtag_free(cldev);
871 }
872
873out:
874 if (ret)
875 mei_cl_unlink(cl);
876notlinked:
877 mutex_unlock(&bus->device_lock);
878
879 return ret;
880}
881EXPORT_SYMBOL_GPL(mei_cldev_enable);
882
883/**
884 * mei_cldev_unregister_callbacks - internal wrapper for unregistering
885 * callbacks.
886 *
887 * @cldev: client device
888 */
889static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
890{
891 if (cldev->rx_cb) {
892 cancel_work_sync(&cldev->rx_work);
893 cldev->rx_cb = NULL;
894 }
895
896 if (cldev->notif_cb) {
897 cancel_work_sync(&cldev->notif_work);
898 cldev->notif_cb = NULL;
899 }
900}
901
902/**
903 * mei_cldev_disable - disable me client device
904 * disconnect form the me client
905 *
906 * @cldev: me client device
907 *
908 * Return: 0 on success and < 0 on error
909 */
910int mei_cldev_disable(struct mei_cl_device *cldev)
911{
912 struct mei_device *bus;
913 struct mei_cl *cl;
914 int err;
915
916 if (!cldev)
917 return -ENODEV;
918
919 cl = cldev->cl;
920
921 bus = cldev->bus;
922
923 mei_cldev_unregister_callbacks(cldev);
924
925 mutex_lock(&bus->device_lock);
926
927 mei_cl_bus_vtag_free(cldev);
928
929 if (!mei_cl_is_connected(cl)) {
930 dev_dbg(bus->dev, "Already disconnected\n");
931 err = 0;
932 goto out;
933 }
934
935 err = mei_cl_disconnect(cl);
936 if (err < 0)
937 dev_err(bus->dev, "Could not disconnect from the ME client\n");
938
939out:
940 /* Flush queues and remove any pending read unless we have mapped DMA */
941 if (!cl->dma_mapped) {
942 mei_cl_flush_queues(cl, NULL);
943 mei_cl_unlink(cl);
944 }
945
946 mutex_unlock(&bus->device_lock);
947 return err;
948}
949EXPORT_SYMBOL_GPL(mei_cldev_disable);
950
951/**
952 * mei_cldev_send_gsc_command - sends a gsc command, by sending
953 * a gsl mei message to gsc and receiving reply from gsc
954 *
955 * @cldev: me client device
956 * @client_id: client id to send the command to
957 * @fence_id: fence id to send the command to
958 * @sg_in: scatter gather list containing addresses for rx message buffer
959 * @total_in_len: total length of data in 'in' sg, can be less than the sum of buffers sizes
960 * @sg_out: scatter gather list containing addresses for tx message buffer
961 *
962 * Return:
963 * * written size in bytes
964 * * < 0 on error
965 */
966ssize_t mei_cldev_send_gsc_command(struct mei_cl_device *cldev,
967 u8 client_id, u32 fence_id,
968 struct scatterlist *sg_in,
969 size_t total_in_len,
970 struct scatterlist *sg_out)
971{
972 struct mei_cl *cl;
973 struct mei_device *bus;
974 ssize_t ret = 0;
975
976 struct mei_ext_hdr_gsc_h2f *ext_hdr;
977 size_t buf_sz = sizeof(struct mei_ext_hdr_gsc_h2f);
978 int sg_out_nents, sg_in_nents;
979 int i;
980 struct scatterlist *sg;
981 struct mei_ext_hdr_gsc_f2h rx_msg;
982 unsigned int sg_len;
983
984 if (!cldev || !sg_in || !sg_out)
985 return -EINVAL;
986
987 cl = cldev->cl;
988 bus = cldev->bus;
989
990 dev_dbg(bus->dev, "client_id %u, fence_id %u\n", client_id, fence_id);
991
992 if (!bus->hbm_f_gsc_supported)
993 return -EOPNOTSUPP;
994
995 sg_out_nents = sg_nents(sg_out);
996 sg_in_nents = sg_nents(sg_in);
997 /* at least one entry in tx and rx sgls must be present */
998 if (sg_out_nents <= 0 || sg_in_nents <= 0)
999 return -EINVAL;
1000
1001 buf_sz += (sg_out_nents + sg_in_nents) * sizeof(struct mei_gsc_sgl);
1002 ext_hdr = kzalloc(buf_sz, GFP_KERNEL);
1003 if (!ext_hdr)
1004 return -ENOMEM;
1005
1006 /* construct the GSC message */
1007 ext_hdr->hdr.type = MEI_EXT_HDR_GSC;
1008 ext_hdr->hdr.length = buf_sz / sizeof(u32); /* length is in dw */
1009
1010 ext_hdr->client_id = client_id;
1011 ext_hdr->addr_type = GSC_ADDRESS_TYPE_PHYSICAL_SGL;
1012 ext_hdr->fence_id = fence_id;
1013 ext_hdr->input_address_count = sg_in_nents;
1014 ext_hdr->output_address_count = sg_out_nents;
1015 ext_hdr->reserved[0] = 0;
1016 ext_hdr->reserved[1] = 0;
1017
1018 /* copy in-sgl to the message */
1019 for (i = 0, sg = sg_in; i < sg_in_nents; i++, sg++) {
1020 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
1021 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
1022 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
1023 ext_hdr->sgl[i].length = (sg_len <= total_in_len) ? sg_len : total_in_len;
1024 total_in_len -= ext_hdr->sgl[i].length;
1025 }
1026
1027 /* copy out-sgl to the message */
1028 for (i = sg_in_nents, sg = sg_out; i < sg_in_nents + sg_out_nents; i++, sg++) {
1029 ext_hdr->sgl[i].low = lower_32_bits(sg_dma_address(sg));
1030 ext_hdr->sgl[i].high = upper_32_bits(sg_dma_address(sg));
1031 sg_len = min_t(unsigned int, sg_dma_len(sg), PAGE_SIZE);
1032 ext_hdr->sgl[i].length = sg_len;
1033 }
1034
1035 /* send the message to GSC */
1036 ret = __mei_cl_send(cl, (u8 *)ext_hdr, buf_sz, 0, MEI_CL_IO_SGL);
1037 if (ret < 0) {
1038 dev_err(bus->dev, "__mei_cl_send failed, returned %zd\n", ret);
1039 goto end;
1040 }
1041 if (ret != buf_sz) {
1042 dev_err(bus->dev, "__mei_cl_send returned %zd instead of expected %zd\n",
1043 ret, buf_sz);
1044 ret = -EIO;
1045 goto end;
1046 }
1047
1048 /* receive the reply from GSC, note that at this point sg_in should contain the reply */
1049 ret = __mei_cl_recv(cl, (u8 *)&rx_msg, sizeof(rx_msg), NULL, MEI_CL_IO_SGL, 0);
1050
1051 if (ret != sizeof(rx_msg)) {
1052 dev_err(bus->dev, "__mei_cl_recv returned %zd instead of expected %zd\n",
1053 ret, sizeof(rx_msg));
1054 if (ret >= 0)
1055 ret = -EIO;
1056 goto end;
1057 }
1058
1059 /* check rx_msg.client_id and rx_msg.fence_id match the ones we send */
1060 if (rx_msg.client_id != client_id || rx_msg.fence_id != fence_id) {
1061 dev_err(bus->dev, "received client_id/fence_id %u/%u instead of %u/%u sent\n",
1062 rx_msg.client_id, rx_msg.fence_id, client_id, fence_id);
1063 ret = -EFAULT;
1064 goto end;
1065 }
1066
1067 dev_dbg(bus->dev, "gsc command: successfully written %u bytes\n", rx_msg.written);
1068 ret = rx_msg.written;
1069
1070end:
1071 kfree(ext_hdr);
1072 return ret;
1073}
1074EXPORT_SYMBOL_GPL(mei_cldev_send_gsc_command);
1075
1076/**
1077 * mei_cl_device_find - find matching entry in the driver id table
1078 *
1079 * @cldev: me client device
1080 * @cldrv: me client driver
1081 *
1082 * Return: id on success; NULL if no id is matching
1083 */
1084static const
1085struct mei_cl_device_id *mei_cl_device_find(const struct mei_cl_device *cldev,
1086 const struct mei_cl_driver *cldrv)
1087{
1088 const struct mei_cl_device_id *id;
1089 const uuid_le *uuid;
1090 u8 version;
1091 bool match;
1092
1093 uuid = mei_me_cl_uuid(cldev->me_cl);
1094 version = mei_me_cl_ver(cldev->me_cl);
1095
1096 id = cldrv->id_table;
1097 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
1098 if (!uuid_le_cmp(*uuid, id->uuid)) {
1099 match = true;
1100
1101 if (cldev->name[0])
1102 if (strncmp(cldev->name, id->name,
1103 sizeof(id->name)))
1104 match = false;
1105
1106 if (id->version != MEI_CL_VERSION_ANY)
1107 if (id->version != version)
1108 match = false;
1109 if (match)
1110 return id;
1111 }
1112
1113 id++;
1114 }
1115
1116 return NULL;
1117}
1118
1119/**
1120 * mei_cl_device_match - device match function
1121 *
1122 * @dev: device
1123 * @drv: driver
1124 *
1125 * Return: 1 if matching device was found 0 otherwise
1126 */
1127static int mei_cl_device_match(struct device *dev, const struct device_driver *drv)
1128{
1129 const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1130 const struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
1131 const struct mei_cl_device_id *found_id;
1132
1133 if (!cldev->do_match)
1134 return 0;
1135
1136 if (!cldrv || !cldrv->id_table)
1137 return 0;
1138
1139 found_id = mei_cl_device_find(cldev, cldrv);
1140 if (found_id)
1141 return 1;
1142
1143 return 0;
1144}
1145
1146/**
1147 * mei_cl_device_probe - bus probe function
1148 *
1149 * @dev: device
1150 *
1151 * Return: 0 on success; < 0 otherwise
1152 */
1153static int mei_cl_device_probe(struct device *dev)
1154{
1155 struct mei_cl_device *cldev;
1156 struct mei_cl_driver *cldrv;
1157 const struct mei_cl_device_id *id;
1158 int ret;
1159
1160 cldev = to_mei_cl_device(dev);
1161 cldrv = to_mei_cl_driver(dev->driver);
1162
1163 if (!cldrv || !cldrv->probe)
1164 return -ENODEV;
1165
1166 id = mei_cl_device_find(cldev, cldrv);
1167 if (!id)
1168 return -ENODEV;
1169
1170 if (!mei_cl_bus_module_get(cldev)) {
1171 dev_err(&cldev->dev, "get hw module failed");
1172 return -ENODEV;
1173 }
1174
1175 ret = cldrv->probe(cldev, id);
1176 if (ret) {
1177 mei_cl_bus_module_put(cldev);
1178 return ret;
1179 }
1180
1181 __module_get(THIS_MODULE);
1182 return 0;
1183}
1184
1185/**
1186 * mei_cl_device_remove - remove device from the bus
1187 *
1188 * @dev: device
1189 *
1190 * Return: 0 on success; < 0 otherwise
1191 */
1192static void mei_cl_device_remove(struct device *dev)
1193{
1194 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1195 struct mei_cl_driver *cldrv = to_mei_cl_driver(dev->driver);
1196
1197 if (cldrv->remove)
1198 cldrv->remove(cldev);
1199
1200 mei_cldev_unregister_callbacks(cldev);
1201
1202 mei_cl_bus_module_put(cldev);
1203 module_put(THIS_MODULE);
1204}
1205
1206static ssize_t name_show(struct device *dev, struct device_attribute *a,
1207 char *buf)
1208{
1209 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1210
1211 return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
1212}
1213static DEVICE_ATTR_RO(name);
1214
1215static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
1216 char *buf)
1217{
1218 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1219 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1220
1221 return sprintf(buf, "%pUl", uuid);
1222}
1223static DEVICE_ATTR_RO(uuid);
1224
1225static ssize_t version_show(struct device *dev, struct device_attribute *a,
1226 char *buf)
1227{
1228 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1229 u8 version = mei_me_cl_ver(cldev->me_cl);
1230
1231 return sprintf(buf, "%02X", version);
1232}
1233static DEVICE_ATTR_RO(version);
1234
1235static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
1236 char *buf)
1237{
1238 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1239 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1240 u8 version = mei_me_cl_ver(cldev->me_cl);
1241
1242 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
1243 cldev->name, uuid, version);
1244}
1245static DEVICE_ATTR_RO(modalias);
1246
1247static ssize_t max_conn_show(struct device *dev, struct device_attribute *a,
1248 char *buf)
1249{
1250 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1251 u8 maxconn = mei_me_cl_max_conn(cldev->me_cl);
1252
1253 return sprintf(buf, "%d", maxconn);
1254}
1255static DEVICE_ATTR_RO(max_conn);
1256
1257static ssize_t fixed_show(struct device *dev, struct device_attribute *a,
1258 char *buf)
1259{
1260 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1261 u8 fixed = mei_me_cl_fixed(cldev->me_cl);
1262
1263 return sprintf(buf, "%d", fixed);
1264}
1265static DEVICE_ATTR_RO(fixed);
1266
1267static ssize_t vtag_show(struct device *dev, struct device_attribute *a,
1268 char *buf)
1269{
1270 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1271 bool vt = mei_me_cl_vt(cldev->me_cl);
1272
1273 return sprintf(buf, "%d", vt);
1274}
1275static DEVICE_ATTR_RO(vtag);
1276
1277static ssize_t max_len_show(struct device *dev, struct device_attribute *a,
1278 char *buf)
1279{
1280 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1281 u32 maxlen = mei_me_cl_max_len(cldev->me_cl);
1282
1283 return sprintf(buf, "%u", maxlen);
1284}
1285static DEVICE_ATTR_RO(max_len);
1286
1287static struct attribute *mei_cldev_attrs[] = {
1288 &dev_attr_name.attr,
1289 &dev_attr_uuid.attr,
1290 &dev_attr_version.attr,
1291 &dev_attr_modalias.attr,
1292 &dev_attr_max_conn.attr,
1293 &dev_attr_fixed.attr,
1294 &dev_attr_vtag.attr,
1295 &dev_attr_max_len.attr,
1296 NULL,
1297};
1298ATTRIBUTE_GROUPS(mei_cldev);
1299
1300/**
1301 * mei_cl_device_uevent - me client bus uevent handler
1302 *
1303 * @dev: device
1304 * @env: uevent kobject
1305 *
1306 * Return: 0 on success -ENOMEM on when add_uevent_var fails
1307 */
1308static int mei_cl_device_uevent(const struct device *dev, struct kobj_uevent_env *env)
1309{
1310 const struct mei_cl_device *cldev = to_mei_cl_device(dev);
1311 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
1312 u8 version = mei_me_cl_ver(cldev->me_cl);
1313
1314 if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
1315 return -ENOMEM;
1316
1317 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
1318 return -ENOMEM;
1319
1320 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
1321 return -ENOMEM;
1322
1323 if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
1324 cldev->name, uuid, version))
1325 return -ENOMEM;
1326
1327 return 0;
1328}
1329
1330static const struct bus_type mei_cl_bus_type = {
1331 .name = "mei",
1332 .dev_groups = mei_cldev_groups,
1333 .match = mei_cl_device_match,
1334 .probe = mei_cl_device_probe,
1335 .remove = mei_cl_device_remove,
1336 .uevent = mei_cl_device_uevent,
1337};
1338
1339static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
1340{
1341 if (bus)
1342 get_device(bus->dev);
1343
1344 return bus;
1345}
1346
1347static void mei_dev_bus_put(struct mei_device *bus)
1348{
1349 if (bus)
1350 put_device(bus->dev);
1351}
1352
1353static void mei_cl_bus_dev_release(struct device *dev)
1354{
1355 struct mei_cl_device *cldev = to_mei_cl_device(dev);
1356
1357 mei_cl_flush_queues(cldev->cl, NULL);
1358 mei_me_cl_put(cldev->me_cl);
1359 mei_dev_bus_put(cldev->bus);
1360 kfree(cldev->cl);
1361 kfree(cldev);
1362}
1363
1364static const struct device_type mei_cl_device_type = {
1365 .release = mei_cl_bus_dev_release,
1366};
1367
1368/**
1369 * mei_cl_bus_set_name - set device name for me client device
1370 * <controller>-<client device>
1371 * Example: 0000:00:16.0-55213584-9a29-4916-badf-0fb7ed682aeb
1372 *
1373 * @cldev: me client device
1374 */
1375static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
1376{
1377 dev_set_name(&cldev->dev, "%s-%pUl",
1378 dev_name(cldev->bus->dev),
1379 mei_me_cl_uuid(cldev->me_cl));
1380}
1381
1382/**
1383 * mei_cl_bus_dev_alloc - initialize and allocate mei client device
1384 *
1385 * @bus: mei device
1386 * @me_cl: me client
1387 *
1388 * Return: allocated device structure or NULL on allocation failure
1389 */
1390static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
1391 struct mei_me_client *me_cl)
1392{
1393 struct mei_cl_device *cldev;
1394 struct mei_cl *cl;
1395
1396 cldev = kzalloc(sizeof(*cldev), GFP_KERNEL);
1397 if (!cldev)
1398 return NULL;
1399
1400 cl = mei_cl_allocate(bus);
1401 if (!cl) {
1402 kfree(cldev);
1403 return NULL;
1404 }
1405
1406 device_initialize(&cldev->dev);
1407 cldev->dev.parent = bus->dev;
1408 cldev->dev.bus = &mei_cl_bus_type;
1409 cldev->dev.type = &mei_cl_device_type;
1410 cldev->bus = mei_dev_bus_get(bus);
1411 cldev->me_cl = mei_me_cl_get(me_cl);
1412 cldev->cl = cl;
1413 mei_cl_bus_set_name(cldev);
1414 cldev->is_added = 0;
1415 INIT_LIST_HEAD(&cldev->bus_list);
1416 device_enable_async_suspend(&cldev->dev);
1417
1418 return cldev;
1419}
1420
1421/**
1422 * mei_cl_bus_dev_setup - setup me client device
1423 * run fix up routines and set the device name
1424 *
1425 * @bus: mei device
1426 * @cldev: me client device
1427 *
1428 * Return: true if the device is eligible for enumeration
1429 */
1430static bool mei_cl_bus_dev_setup(struct mei_device *bus,
1431 struct mei_cl_device *cldev)
1432{
1433 cldev->do_match = 1;
1434 mei_cl_bus_dev_fixup(cldev);
1435
1436 /* the device name can change during fix up */
1437 if (cldev->do_match)
1438 mei_cl_bus_set_name(cldev);
1439
1440 return cldev->do_match == 1;
1441}
1442
1443/**
1444 * mei_cl_bus_dev_add - add me client devices
1445 *
1446 * @cldev: me client device
1447 *
1448 * Return: 0 on success; < 0 on failure
1449 */
1450static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
1451{
1452 int ret;
1453
1454 dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
1455 mei_me_cl_uuid(cldev->me_cl),
1456 mei_me_cl_ver(cldev->me_cl));
1457 ret = device_add(&cldev->dev);
1458 if (!ret)
1459 cldev->is_added = 1;
1460
1461 return ret;
1462}
1463
1464/**
1465 * mei_cl_bus_dev_stop - stop the driver
1466 *
1467 * @cldev: me client device
1468 */
1469static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
1470{
1471 cldev->do_match = 0;
1472 if (cldev->is_added)
1473 device_release_driver(&cldev->dev);
1474}
1475
1476/**
1477 * mei_cl_bus_dev_destroy - destroy me client devices object
1478 *
1479 * @cldev: me client device
1480 *
1481 * Locking: called under "dev->cl_bus_lock" lock
1482 */
1483static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
1484{
1485
1486 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
1487
1488 if (!cldev->is_added)
1489 return;
1490
1491 device_del(&cldev->dev);
1492
1493 list_del_init(&cldev->bus_list);
1494
1495 cldev->is_added = 0;
1496 put_device(&cldev->dev);
1497}
1498
1499/**
1500 * mei_cl_bus_remove_device - remove a devices form the bus
1501 *
1502 * @cldev: me client device
1503 */
1504static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1505{
1506 mei_cl_bus_dev_stop(cldev);
1507 mei_cl_bus_dev_destroy(cldev);
1508}
1509
1510/**
1511 * mei_cl_bus_remove_devices - remove all devices form the bus
1512 *
1513 * @bus: mei device
1514 */
1515void mei_cl_bus_remove_devices(struct mei_device *bus)
1516{
1517 struct mei_cl_device *cldev, *next;
1518
1519 mutex_lock(&bus->cl_bus_lock);
1520 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1521 mei_cl_bus_remove_device(cldev);
1522 mutex_unlock(&bus->cl_bus_lock);
1523}
1524
1525
1526/**
1527 * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1528 * based on me client
1529 *
1530 * @bus: mei device
1531 * @me_cl: me client
1532 *
1533 * Locking: called under "dev->cl_bus_lock" lock
1534 */
1535static void mei_cl_bus_dev_init(struct mei_device *bus,
1536 struct mei_me_client *me_cl)
1537{
1538 struct mei_cl_device *cldev;
1539
1540 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1541
1542 dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1543
1544 if (me_cl->bus_added)
1545 return;
1546
1547 cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1548 if (!cldev)
1549 return;
1550
1551 me_cl->bus_added = true;
1552 list_add_tail(&cldev->bus_list, &bus->device_list);
1553
1554}
1555
1556/**
1557 * mei_cl_bus_rescan - scan me clients list and add create
1558 * devices for eligible clients
1559 *
1560 * @bus: mei device
1561 */
1562static void mei_cl_bus_rescan(struct mei_device *bus)
1563{
1564 struct mei_cl_device *cldev, *n;
1565 struct mei_me_client *me_cl;
1566
1567 mutex_lock(&bus->cl_bus_lock);
1568
1569 down_read(&bus->me_clients_rwsem);
1570 list_for_each_entry(me_cl, &bus->me_clients, list)
1571 mei_cl_bus_dev_init(bus, me_cl);
1572 up_read(&bus->me_clients_rwsem);
1573
1574 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1575
1576 if (!mei_me_cl_is_active(cldev->me_cl)) {
1577 mei_cl_bus_remove_device(cldev);
1578 continue;
1579 }
1580
1581 if (cldev->is_added)
1582 continue;
1583
1584 if (mei_cl_bus_dev_setup(bus, cldev))
1585 mei_cl_bus_dev_add(cldev);
1586 else {
1587 list_del_init(&cldev->bus_list);
1588 put_device(&cldev->dev);
1589 }
1590 }
1591 mutex_unlock(&bus->cl_bus_lock);
1592
1593 dev_dbg(bus->dev, "rescan end");
1594}
1595
1596void mei_cl_bus_rescan_work(struct work_struct *work)
1597{
1598 struct mei_device *bus =
1599 container_of(work, struct mei_device, bus_rescan_work);
1600
1601 mei_cl_bus_rescan(bus);
1602}
1603
1604int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1605 struct module *owner)
1606{
1607 int err;
1608
1609 cldrv->driver.name = cldrv->name;
1610 cldrv->driver.owner = owner;
1611 cldrv->driver.bus = &mei_cl_bus_type;
1612
1613 err = driver_register(&cldrv->driver);
1614 if (err)
1615 return err;
1616
1617 pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1618
1619 return 0;
1620}
1621EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1622
1623void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1624{
1625 driver_unregister(&cldrv->driver);
1626
1627 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1628}
1629EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1630
1631
1632int __init mei_cl_bus_init(void)
1633{
1634 return bus_register(&mei_cl_bus_type);
1635}
1636
1637void __exit mei_cl_bus_exit(void)
1638{
1639 bus_unregister(&mei_cl_bus_type);
1640}
1/*
2 * Intel Management Engine Interface (Intel MEI) Linux driver
3 * Copyright (c) 2012-2013, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <linux/module.h>
17#include <linux/device.h>
18#include <linux/kernel.h>
19#include <linux/sched/signal.h>
20#include <linux/init.h>
21#include <linux/errno.h>
22#include <linux/slab.h>
23#include <linux/mutex.h>
24#include <linux/interrupt.h>
25#include <linux/mei_cl_bus.h>
26
27#include "mei_dev.h"
28#include "client.h"
29
30#define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver)
31#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev)
32
33/**
34 * __mei_cl_send - internal client send (write)
35 *
36 * @cl: host client
37 * @buf: buffer to send
38 * @length: buffer length
39 * @mode: sending mode
40 *
41 * Return: written size bytes or < 0 on error
42 */
43ssize_t __mei_cl_send(struct mei_cl *cl, u8 *buf, size_t length,
44 unsigned int mode)
45{
46 struct mei_device *bus;
47 struct mei_cl_cb *cb;
48 ssize_t rets;
49
50 if (WARN_ON(!cl || !cl->dev))
51 return -ENODEV;
52
53 bus = cl->dev;
54
55 mutex_lock(&bus->device_lock);
56 if (bus->dev_state != MEI_DEV_ENABLED) {
57 rets = -ENODEV;
58 goto out;
59 }
60
61 if (!mei_cl_is_connected(cl)) {
62 rets = -ENODEV;
63 goto out;
64 }
65
66 /* Check if we have an ME client device */
67 if (!mei_me_cl_is_active(cl->me_cl)) {
68 rets = -ENOTTY;
69 goto out;
70 }
71
72 if (length > mei_cl_mtu(cl)) {
73 rets = -EFBIG;
74 goto out;
75 }
76
77 while (cl->tx_cb_queued >= bus->tx_queue_limit) {
78 mutex_unlock(&bus->device_lock);
79 rets = wait_event_interruptible(cl->tx_wait,
80 cl->writing_state == MEI_WRITE_COMPLETE ||
81 (!mei_cl_is_connected(cl)));
82 mutex_lock(&bus->device_lock);
83 if (rets) {
84 if (signal_pending(current))
85 rets = -EINTR;
86 goto out;
87 }
88 if (!mei_cl_is_connected(cl)) {
89 rets = -ENODEV;
90 goto out;
91 }
92 }
93
94 cb = mei_cl_alloc_cb(cl, length, MEI_FOP_WRITE, NULL);
95 if (!cb) {
96 rets = -ENOMEM;
97 goto out;
98 }
99
100 cb->internal = !!(mode & MEI_CL_IO_TX_INTERNAL);
101 cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
102 memcpy(cb->buf.data, buf, length);
103
104 rets = mei_cl_write(cl, cb);
105
106out:
107 mutex_unlock(&bus->device_lock);
108
109 return rets;
110}
111
112/**
113 * __mei_cl_recv - internal client receive (read)
114 *
115 * @cl: host client
116 * @buf: buffer to receive
117 * @length: buffer length
118 * @mode: io mode
119 *
120 * Return: read size in bytes of < 0 on error
121 */
122ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length,
123 unsigned int mode)
124{
125 struct mei_device *bus;
126 struct mei_cl_cb *cb;
127 size_t r_length;
128 ssize_t rets;
129 bool nonblock = !!(mode & MEI_CL_IO_RX_NONBLOCK);
130
131 if (WARN_ON(!cl || !cl->dev))
132 return -ENODEV;
133
134 bus = cl->dev;
135
136 mutex_lock(&bus->device_lock);
137 if (bus->dev_state != MEI_DEV_ENABLED) {
138 rets = -ENODEV;
139 goto out;
140 }
141
142 cb = mei_cl_read_cb(cl, NULL);
143 if (cb)
144 goto copy;
145
146 rets = mei_cl_read_start(cl, length, NULL);
147 if (rets && rets != -EBUSY)
148 goto out;
149
150 if (nonblock) {
151 rets = -EAGAIN;
152 goto out;
153 }
154
155 /* wait on event only if there is no other waiter */
156 /* synchronized under device mutex */
157 if (!waitqueue_active(&cl->rx_wait)) {
158
159 mutex_unlock(&bus->device_lock);
160
161 if (wait_event_interruptible(cl->rx_wait,
162 (!list_empty(&cl->rd_completed)) ||
163 (!mei_cl_is_connected(cl)))) {
164
165 if (signal_pending(current))
166 return -EINTR;
167 return -ERESTARTSYS;
168 }
169
170 mutex_lock(&bus->device_lock);
171
172 if (!mei_cl_is_connected(cl)) {
173 rets = -ENODEV;
174 goto out;
175 }
176 }
177
178 cb = mei_cl_read_cb(cl, NULL);
179 if (!cb) {
180 rets = 0;
181 goto out;
182 }
183
184copy:
185 if (cb->status) {
186 rets = cb->status;
187 goto free;
188 }
189
190 r_length = min_t(size_t, length, cb->buf_idx);
191 memcpy(buf, cb->buf.data, r_length);
192 rets = r_length;
193
194free:
195 mei_io_cb_free(cb);
196out:
197 mutex_unlock(&bus->device_lock);
198
199 return rets;
200}
201
202/**
203 * mei_cldev_send - me device send (write)
204 *
205 * @cldev: me client device
206 * @buf: buffer to send
207 * @length: buffer length
208 *
209 * Return: written size in bytes or < 0 on error
210 */
211ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length)
212{
213 struct mei_cl *cl = cldev->cl;
214
215 return __mei_cl_send(cl, buf, length, MEI_CL_IO_TX_BLOCKING);
216}
217EXPORT_SYMBOL_GPL(mei_cldev_send);
218
219/**
220 * mei_cldev_recv_nonblock - non block client receive (read)
221 *
222 * @cldev: me client device
223 * @buf: buffer to receive
224 * @length: buffer length
225 *
226 * Return: read size in bytes of < 0 on error
227 * -EAGAIN if function will block.
228 */
229ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
230 size_t length)
231{
232 struct mei_cl *cl = cldev->cl;
233
234 return __mei_cl_recv(cl, buf, length, MEI_CL_IO_RX_NONBLOCK);
235}
236EXPORT_SYMBOL_GPL(mei_cldev_recv_nonblock);
237
238/**
239 * mei_cldev_recv - client receive (read)
240 *
241 * @cldev: me client device
242 * @buf: buffer to receive
243 * @length: buffer length
244 *
245 * Return: read size in bytes of < 0 on error
246 */
247ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length)
248{
249 struct mei_cl *cl = cldev->cl;
250
251 return __mei_cl_recv(cl, buf, length, 0);
252}
253EXPORT_SYMBOL_GPL(mei_cldev_recv);
254
255/**
256 * mei_cl_bus_rx_work - dispatch rx event for a bus device
257 *
258 * @work: work
259 */
260static void mei_cl_bus_rx_work(struct work_struct *work)
261{
262 struct mei_cl_device *cldev;
263 struct mei_device *bus;
264
265 cldev = container_of(work, struct mei_cl_device, rx_work);
266
267 bus = cldev->bus;
268
269 if (cldev->rx_cb)
270 cldev->rx_cb(cldev);
271
272 mutex_lock(&bus->device_lock);
273 mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
274 mutex_unlock(&bus->device_lock);
275}
276
277/**
278 * mei_cl_bus_notif_work - dispatch FW notif event for a bus device
279 *
280 * @work: work
281 */
282static void mei_cl_bus_notif_work(struct work_struct *work)
283{
284 struct mei_cl_device *cldev;
285
286 cldev = container_of(work, struct mei_cl_device, notif_work);
287
288 if (cldev->notif_cb)
289 cldev->notif_cb(cldev);
290}
291
292/**
293 * mei_cl_bus_notify_event - schedule notify cb on bus client
294 *
295 * @cl: host client
296 *
297 * Return: true if event was scheduled
298 * false if the client is not waiting for event
299 */
300bool mei_cl_bus_notify_event(struct mei_cl *cl)
301{
302 struct mei_cl_device *cldev = cl->cldev;
303
304 if (!cldev || !cldev->notif_cb)
305 return false;
306
307 if (!cl->notify_ev)
308 return false;
309
310 schedule_work(&cldev->notif_work);
311
312 cl->notify_ev = false;
313
314 return true;
315}
316
317/**
318 * mei_cl_bus_rx_event - schedule rx event
319 *
320 * @cl: host client
321 *
322 * Return: true if event was scheduled
323 * false if the client is not waiting for event
324 */
325bool mei_cl_bus_rx_event(struct mei_cl *cl)
326{
327 struct mei_cl_device *cldev = cl->cldev;
328
329 if (!cldev || !cldev->rx_cb)
330 return false;
331
332 schedule_work(&cldev->rx_work);
333
334 return true;
335}
336
337/**
338 * mei_cldev_register_rx_cb - register Rx event callback
339 *
340 * @cldev: me client devices
341 * @rx_cb: callback function
342 *
343 * Return: 0 on success
344 * -EALREADY if an callback is already registered
345 * <0 on other errors
346 */
347int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb)
348{
349 struct mei_device *bus = cldev->bus;
350 int ret;
351
352 if (!rx_cb)
353 return -EINVAL;
354 if (cldev->rx_cb)
355 return -EALREADY;
356
357 cldev->rx_cb = rx_cb;
358 INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work);
359
360 mutex_lock(&bus->device_lock);
361 ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL);
362 mutex_unlock(&bus->device_lock);
363 if (ret && ret != -EBUSY)
364 return ret;
365
366 return 0;
367}
368EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb);
369
370/**
371 * mei_cldev_register_notif_cb - register FW notification event callback
372 *
373 * @cldev: me client devices
374 * @notif_cb: callback function
375 *
376 * Return: 0 on success
377 * -EALREADY if an callback is already registered
378 * <0 on other errors
379 */
380int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
381 mei_cldev_cb_t notif_cb)
382{
383 struct mei_device *bus = cldev->bus;
384 int ret;
385
386 if (!notif_cb)
387 return -EINVAL;
388
389 if (cldev->notif_cb)
390 return -EALREADY;
391
392 cldev->notif_cb = notif_cb;
393 INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work);
394
395 mutex_lock(&bus->device_lock);
396 ret = mei_cl_notify_request(cldev->cl, NULL, 1);
397 mutex_unlock(&bus->device_lock);
398 if (ret)
399 return ret;
400
401 return 0;
402}
403EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb);
404
405/**
406 * mei_cldev_get_drvdata - driver data getter
407 *
408 * @cldev: mei client device
409 *
410 * Return: driver private data
411 */
412void *mei_cldev_get_drvdata(const struct mei_cl_device *cldev)
413{
414 return dev_get_drvdata(&cldev->dev);
415}
416EXPORT_SYMBOL_GPL(mei_cldev_get_drvdata);
417
418/**
419 * mei_cldev_set_drvdata - driver data setter
420 *
421 * @cldev: mei client device
422 * @data: data to store
423 */
424void mei_cldev_set_drvdata(struct mei_cl_device *cldev, void *data)
425{
426 dev_set_drvdata(&cldev->dev, data);
427}
428EXPORT_SYMBOL_GPL(mei_cldev_set_drvdata);
429
430/**
431 * mei_cldev_uuid - return uuid of the underlying me client
432 *
433 * @cldev: mei client device
434 *
435 * Return: me client uuid
436 */
437const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev)
438{
439 return mei_me_cl_uuid(cldev->me_cl);
440}
441EXPORT_SYMBOL_GPL(mei_cldev_uuid);
442
443/**
444 * mei_cldev_ver - return protocol version of the underlying me client
445 *
446 * @cldev: mei client device
447 *
448 * Return: me client protocol version
449 */
450u8 mei_cldev_ver(const struct mei_cl_device *cldev)
451{
452 return mei_me_cl_ver(cldev->me_cl);
453}
454EXPORT_SYMBOL_GPL(mei_cldev_ver);
455
456/**
457 * mei_cldev_enabled - check whether the device is enabled
458 *
459 * @cldev: mei client device
460 *
461 * Return: true if me client is initialized and connected
462 */
463bool mei_cldev_enabled(struct mei_cl_device *cldev)
464{
465 return mei_cl_is_connected(cldev->cl);
466}
467EXPORT_SYMBOL_GPL(mei_cldev_enabled);
468
469/**
470 * mei_cl_bus_module_get - acquire module of the underlying
471 * hw driver.
472 *
473 * @cldev: mei client device
474 *
475 * Return: true on success; false if the module was removed.
476 */
477static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
478{
479 return try_module_get(cldev->bus->dev->driver->owner);
480}
481
482/**
483 * mei_cl_bus_module_put - release the underlying hw module.
484 *
485 * @cldev: mei client device
486 */
487static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
488{
489 module_put(cldev->bus->dev->driver->owner);
490}
491
492/**
493 * mei_cldev_enable - enable me client device
494 * create connection with me client
495 *
496 * @cldev: me client device
497 *
498 * Return: 0 on success and < 0 on error
499 */
500int mei_cldev_enable(struct mei_cl_device *cldev)
501{
502 struct mei_device *bus = cldev->bus;
503 struct mei_cl *cl;
504 int ret;
505
506 cl = cldev->cl;
507
508 if (cl->state == MEI_FILE_UNINITIALIZED) {
509 mutex_lock(&bus->device_lock);
510 ret = mei_cl_link(cl);
511 mutex_unlock(&bus->device_lock);
512 if (ret)
513 return ret;
514 /* update pointers */
515 cl->cldev = cldev;
516 }
517
518 mutex_lock(&bus->device_lock);
519 if (mei_cl_is_connected(cl)) {
520 ret = 0;
521 goto out;
522 }
523
524 if (!mei_me_cl_is_active(cldev->me_cl)) {
525 dev_err(&cldev->dev, "me client is not active\n");
526 ret = -ENOTTY;
527 goto out;
528 }
529
530 if (!mei_cl_bus_module_get(cldev)) {
531 dev_err(&cldev->dev, "get hw module failed");
532 ret = -ENODEV;
533 goto out;
534 }
535
536 ret = mei_cl_connect(cl, cldev->me_cl, NULL);
537 if (ret < 0) {
538 dev_err(&cldev->dev, "cannot connect\n");
539 mei_cl_bus_module_put(cldev);
540 }
541
542out:
543 mutex_unlock(&bus->device_lock);
544
545 return ret;
546}
547EXPORT_SYMBOL_GPL(mei_cldev_enable);
548
549/**
550 * mei_cldev_unregister_callbacks - internal wrapper for unregistering
551 * callbacks.
552 *
553 * @cldev: client device
554 */
555static void mei_cldev_unregister_callbacks(struct mei_cl_device *cldev)
556{
557 if (cldev->rx_cb) {
558 cancel_work_sync(&cldev->rx_work);
559 cldev->rx_cb = NULL;
560 }
561
562 if (cldev->notif_cb) {
563 cancel_work_sync(&cldev->notif_work);
564 cldev->notif_cb = NULL;
565 }
566}
567
568/**
569 * mei_cldev_disable - disable me client device
570 * disconnect form the me client
571 *
572 * @cldev: me client device
573 *
574 * Return: 0 on success and < 0 on error
575 */
576int mei_cldev_disable(struct mei_cl_device *cldev)
577{
578 struct mei_device *bus;
579 struct mei_cl *cl;
580 int err;
581
582 if (!cldev)
583 return -ENODEV;
584
585 cl = cldev->cl;
586
587 bus = cldev->bus;
588
589 mei_cldev_unregister_callbacks(cldev);
590
591 mutex_lock(&bus->device_lock);
592
593 if (!mei_cl_is_connected(cl)) {
594 dev_dbg(bus->dev, "Already disconnected\n");
595 err = 0;
596 goto out;
597 }
598
599 err = mei_cl_disconnect(cl);
600 if (err < 0)
601 dev_err(bus->dev, "Could not disconnect from the ME client\n");
602
603out:
604 mei_cl_bus_module_put(cldev);
605
606 /* Flush queues and remove any pending read */
607 mei_cl_flush_queues(cl, NULL);
608 mei_cl_unlink(cl);
609
610 mutex_unlock(&bus->device_lock);
611 return err;
612}
613EXPORT_SYMBOL_GPL(mei_cldev_disable);
614
615/**
616 * mei_cl_device_find - find matching entry in the driver id table
617 *
618 * @cldev: me client device
619 * @cldrv: me client driver
620 *
621 * Return: id on success; NULL if no id is matching
622 */
623static const
624struct mei_cl_device_id *mei_cl_device_find(struct mei_cl_device *cldev,
625 struct mei_cl_driver *cldrv)
626{
627 const struct mei_cl_device_id *id;
628 const uuid_le *uuid;
629 u8 version;
630 bool match;
631
632 uuid = mei_me_cl_uuid(cldev->me_cl);
633 version = mei_me_cl_ver(cldev->me_cl);
634
635 id = cldrv->id_table;
636 while (uuid_le_cmp(NULL_UUID_LE, id->uuid)) {
637 if (!uuid_le_cmp(*uuid, id->uuid)) {
638 match = true;
639
640 if (cldev->name[0])
641 if (strncmp(cldev->name, id->name,
642 sizeof(id->name)))
643 match = false;
644
645 if (id->version != MEI_CL_VERSION_ANY)
646 if (id->version != version)
647 match = false;
648 if (match)
649 return id;
650 }
651
652 id++;
653 }
654
655 return NULL;
656}
657
658/**
659 * mei_cl_device_match - device match function
660 *
661 * @dev: device
662 * @drv: driver
663 *
664 * Return: 1 if matching device was found 0 otherwise
665 */
666static int mei_cl_device_match(struct device *dev, struct device_driver *drv)
667{
668 struct mei_cl_device *cldev = to_mei_cl_device(dev);
669 struct mei_cl_driver *cldrv = to_mei_cl_driver(drv);
670 const struct mei_cl_device_id *found_id;
671
672 if (!cldev)
673 return 0;
674
675 if (!cldev->do_match)
676 return 0;
677
678 if (!cldrv || !cldrv->id_table)
679 return 0;
680
681 found_id = mei_cl_device_find(cldev, cldrv);
682 if (found_id)
683 return 1;
684
685 return 0;
686}
687
688/**
689 * mei_cl_device_probe - bus probe function
690 *
691 * @dev: device
692 *
693 * Return: 0 on success; < 0 otherwise
694 */
695static int mei_cl_device_probe(struct device *dev)
696{
697 struct mei_cl_device *cldev;
698 struct mei_cl_driver *cldrv;
699 const struct mei_cl_device_id *id;
700 int ret;
701
702 cldev = to_mei_cl_device(dev);
703 cldrv = to_mei_cl_driver(dev->driver);
704
705 if (!cldev)
706 return 0;
707
708 if (!cldrv || !cldrv->probe)
709 return -ENODEV;
710
711 id = mei_cl_device_find(cldev, cldrv);
712 if (!id)
713 return -ENODEV;
714
715 ret = cldrv->probe(cldev, id);
716 if (ret)
717 return ret;
718
719 __module_get(THIS_MODULE);
720 return 0;
721}
722
723/**
724 * mei_cl_device_remove - remove device from the bus
725 *
726 * @dev: device
727 *
728 * Return: 0 on success; < 0 otherwise
729 */
730static int mei_cl_device_remove(struct device *dev)
731{
732 struct mei_cl_device *cldev = to_mei_cl_device(dev);
733 struct mei_cl_driver *cldrv;
734 int ret = 0;
735
736 if (!cldev || !dev->driver)
737 return 0;
738
739 cldrv = to_mei_cl_driver(dev->driver);
740 if (cldrv->remove)
741 ret = cldrv->remove(cldev);
742
743 mei_cldev_unregister_callbacks(cldev);
744
745 module_put(THIS_MODULE);
746 dev->driver = NULL;
747 return ret;
748
749}
750
751static ssize_t name_show(struct device *dev, struct device_attribute *a,
752 char *buf)
753{
754 struct mei_cl_device *cldev = to_mei_cl_device(dev);
755
756 return scnprintf(buf, PAGE_SIZE, "%s", cldev->name);
757}
758static DEVICE_ATTR_RO(name);
759
760static ssize_t uuid_show(struct device *dev, struct device_attribute *a,
761 char *buf)
762{
763 struct mei_cl_device *cldev = to_mei_cl_device(dev);
764 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
765
766 return scnprintf(buf, PAGE_SIZE, "%pUl", uuid);
767}
768static DEVICE_ATTR_RO(uuid);
769
770static ssize_t version_show(struct device *dev, struct device_attribute *a,
771 char *buf)
772{
773 struct mei_cl_device *cldev = to_mei_cl_device(dev);
774 u8 version = mei_me_cl_ver(cldev->me_cl);
775
776 return scnprintf(buf, PAGE_SIZE, "%02X", version);
777}
778static DEVICE_ATTR_RO(version);
779
780static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
781 char *buf)
782{
783 struct mei_cl_device *cldev = to_mei_cl_device(dev);
784 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
785 u8 version = mei_me_cl_ver(cldev->me_cl);
786
787 return scnprintf(buf, PAGE_SIZE, "mei:%s:%pUl:%02X:",
788 cldev->name, uuid, version);
789}
790static DEVICE_ATTR_RO(modalias);
791
792static struct attribute *mei_cldev_attrs[] = {
793 &dev_attr_name.attr,
794 &dev_attr_uuid.attr,
795 &dev_attr_version.attr,
796 &dev_attr_modalias.attr,
797 NULL,
798};
799ATTRIBUTE_GROUPS(mei_cldev);
800
801/**
802 * mei_cl_device_uevent - me client bus uevent handler
803 *
804 * @dev: device
805 * @env: uevent kobject
806 *
807 * Return: 0 on success -ENOMEM on when add_uevent_var fails
808 */
809static int mei_cl_device_uevent(struct device *dev, struct kobj_uevent_env *env)
810{
811 struct mei_cl_device *cldev = to_mei_cl_device(dev);
812 const uuid_le *uuid = mei_me_cl_uuid(cldev->me_cl);
813 u8 version = mei_me_cl_ver(cldev->me_cl);
814
815 if (add_uevent_var(env, "MEI_CL_VERSION=%d", version))
816 return -ENOMEM;
817
818 if (add_uevent_var(env, "MEI_CL_UUID=%pUl", uuid))
819 return -ENOMEM;
820
821 if (add_uevent_var(env, "MEI_CL_NAME=%s", cldev->name))
822 return -ENOMEM;
823
824 if (add_uevent_var(env, "MODALIAS=mei:%s:%pUl:%02X:",
825 cldev->name, uuid, version))
826 return -ENOMEM;
827
828 return 0;
829}
830
831static struct bus_type mei_cl_bus_type = {
832 .name = "mei",
833 .dev_groups = mei_cldev_groups,
834 .match = mei_cl_device_match,
835 .probe = mei_cl_device_probe,
836 .remove = mei_cl_device_remove,
837 .uevent = mei_cl_device_uevent,
838};
839
840static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
841{
842 if (bus)
843 get_device(bus->dev);
844
845 return bus;
846}
847
848static void mei_dev_bus_put(struct mei_device *bus)
849{
850 if (bus)
851 put_device(bus->dev);
852}
853
854static void mei_cl_bus_dev_release(struct device *dev)
855{
856 struct mei_cl_device *cldev = to_mei_cl_device(dev);
857
858 if (!cldev)
859 return;
860
861 mei_me_cl_put(cldev->me_cl);
862 mei_dev_bus_put(cldev->bus);
863 kfree(cldev->cl);
864 kfree(cldev);
865}
866
867static const struct device_type mei_cl_device_type = {
868 .release = mei_cl_bus_dev_release,
869};
870
871/**
872 * mei_cl_bus_set_name - set device name for me client device
873 *
874 * @cldev: me client device
875 */
876static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
877{
878 dev_set_name(&cldev->dev, "mei:%s:%pUl:%02X",
879 cldev->name,
880 mei_me_cl_uuid(cldev->me_cl),
881 mei_me_cl_ver(cldev->me_cl));
882}
883
884/**
885 * mei_cl_bus_dev_alloc - initialize and allocate mei client device
886 *
887 * @bus: mei device
888 * @me_cl: me client
889 *
890 * Return: allocated device structur or NULL on allocation failure
891 */
892static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
893 struct mei_me_client *me_cl)
894{
895 struct mei_cl_device *cldev;
896 struct mei_cl *cl;
897
898 cldev = kzalloc(sizeof(struct mei_cl_device), GFP_KERNEL);
899 if (!cldev)
900 return NULL;
901
902 cl = mei_cl_allocate(bus);
903 if (!cl) {
904 kfree(cldev);
905 return NULL;
906 }
907
908 device_initialize(&cldev->dev);
909 cldev->dev.parent = bus->dev;
910 cldev->dev.bus = &mei_cl_bus_type;
911 cldev->dev.type = &mei_cl_device_type;
912 cldev->bus = mei_dev_bus_get(bus);
913 cldev->me_cl = mei_me_cl_get(me_cl);
914 cldev->cl = cl;
915 mei_cl_bus_set_name(cldev);
916 cldev->is_added = 0;
917 INIT_LIST_HEAD(&cldev->bus_list);
918
919 return cldev;
920}
921
922/**
923 * mei_cl_dev_setup - setup me client device
924 * run fix up routines and set the device name
925 *
926 * @bus: mei device
927 * @cldev: me client device
928 *
929 * Return: true if the device is eligible for enumeration
930 */
931static bool mei_cl_bus_dev_setup(struct mei_device *bus,
932 struct mei_cl_device *cldev)
933{
934 cldev->do_match = 1;
935 mei_cl_bus_dev_fixup(cldev);
936
937 /* the device name can change during fix up */
938 if (cldev->do_match)
939 mei_cl_bus_set_name(cldev);
940
941 return cldev->do_match == 1;
942}
943
944/**
945 * mei_cl_bus_dev_add - add me client devices
946 *
947 * @cldev: me client device
948 *
949 * Return: 0 on success; < 0 on failre
950 */
951static int mei_cl_bus_dev_add(struct mei_cl_device *cldev)
952{
953 int ret;
954
955 dev_dbg(cldev->bus->dev, "adding %pUL:%02X\n",
956 mei_me_cl_uuid(cldev->me_cl),
957 mei_me_cl_ver(cldev->me_cl));
958 ret = device_add(&cldev->dev);
959 if (!ret)
960 cldev->is_added = 1;
961
962 return ret;
963}
964
965/**
966 * mei_cl_bus_dev_stop - stop the driver
967 *
968 * @cldev: me client device
969 */
970static void mei_cl_bus_dev_stop(struct mei_cl_device *cldev)
971{
972 if (cldev->is_added)
973 device_release_driver(&cldev->dev);
974}
975
976/**
977 * mei_cl_bus_dev_destroy - destroy me client devices object
978 *
979 * @cldev: me client device
980 *
981 * Locking: called under "dev->cl_bus_lock" lock
982 */
983static void mei_cl_bus_dev_destroy(struct mei_cl_device *cldev)
984{
985
986 WARN_ON(!mutex_is_locked(&cldev->bus->cl_bus_lock));
987
988 if (!cldev->is_added)
989 return;
990
991 device_del(&cldev->dev);
992
993 list_del_init(&cldev->bus_list);
994
995 cldev->is_added = 0;
996 put_device(&cldev->dev);
997}
998
999/**
1000 * mei_cl_bus_remove_device - remove a devices form the bus
1001 *
1002 * @cldev: me client device
1003 */
1004static void mei_cl_bus_remove_device(struct mei_cl_device *cldev)
1005{
1006 mei_cl_bus_dev_stop(cldev);
1007 mei_cl_bus_dev_destroy(cldev);
1008}
1009
1010/**
1011 * mei_cl_bus_remove_devices - remove all devices form the bus
1012 *
1013 * @bus: mei device
1014 */
1015void mei_cl_bus_remove_devices(struct mei_device *bus)
1016{
1017 struct mei_cl_device *cldev, *next;
1018
1019 mutex_lock(&bus->cl_bus_lock);
1020 list_for_each_entry_safe(cldev, next, &bus->device_list, bus_list)
1021 mei_cl_bus_remove_device(cldev);
1022 mutex_unlock(&bus->cl_bus_lock);
1023}
1024
1025
1026/**
1027 * mei_cl_bus_dev_init - allocate and initializes an mei client devices
1028 * based on me client
1029 *
1030 * @bus: mei device
1031 * @me_cl: me client
1032 *
1033 * Locking: called under "dev->cl_bus_lock" lock
1034 */
1035static void mei_cl_bus_dev_init(struct mei_device *bus,
1036 struct mei_me_client *me_cl)
1037{
1038 struct mei_cl_device *cldev;
1039
1040 WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
1041
1042 dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
1043
1044 if (me_cl->bus_added)
1045 return;
1046
1047 cldev = mei_cl_bus_dev_alloc(bus, me_cl);
1048 if (!cldev)
1049 return;
1050
1051 me_cl->bus_added = true;
1052 list_add_tail(&cldev->bus_list, &bus->device_list);
1053
1054}
1055
1056/**
1057 * mei_cl_bus_rescan - scan me clients list and add create
1058 * devices for eligible clients
1059 *
1060 * @bus: mei device
1061 */
1062static void mei_cl_bus_rescan(struct mei_device *bus)
1063{
1064 struct mei_cl_device *cldev, *n;
1065 struct mei_me_client *me_cl;
1066
1067 mutex_lock(&bus->cl_bus_lock);
1068
1069 down_read(&bus->me_clients_rwsem);
1070 list_for_each_entry(me_cl, &bus->me_clients, list)
1071 mei_cl_bus_dev_init(bus, me_cl);
1072 up_read(&bus->me_clients_rwsem);
1073
1074 list_for_each_entry_safe(cldev, n, &bus->device_list, bus_list) {
1075
1076 if (!mei_me_cl_is_active(cldev->me_cl)) {
1077 mei_cl_bus_remove_device(cldev);
1078 continue;
1079 }
1080
1081 if (cldev->is_added)
1082 continue;
1083
1084 if (mei_cl_bus_dev_setup(bus, cldev))
1085 mei_cl_bus_dev_add(cldev);
1086 else {
1087 list_del_init(&cldev->bus_list);
1088 put_device(&cldev->dev);
1089 }
1090 }
1091 mutex_unlock(&bus->cl_bus_lock);
1092
1093 dev_dbg(bus->dev, "rescan end");
1094}
1095
1096void mei_cl_bus_rescan_work(struct work_struct *work)
1097{
1098 struct mei_device *bus =
1099 container_of(work, struct mei_device, bus_rescan_work);
1100
1101 mei_cl_bus_rescan(bus);
1102}
1103
1104int __mei_cldev_driver_register(struct mei_cl_driver *cldrv,
1105 struct module *owner)
1106{
1107 int err;
1108
1109 cldrv->driver.name = cldrv->name;
1110 cldrv->driver.owner = owner;
1111 cldrv->driver.bus = &mei_cl_bus_type;
1112
1113 err = driver_register(&cldrv->driver);
1114 if (err)
1115 return err;
1116
1117 pr_debug("mei: driver [%s] registered\n", cldrv->driver.name);
1118
1119 return 0;
1120}
1121EXPORT_SYMBOL_GPL(__mei_cldev_driver_register);
1122
1123void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv)
1124{
1125 driver_unregister(&cldrv->driver);
1126
1127 pr_debug("mei: driver [%s] unregistered\n", cldrv->driver.name);
1128}
1129EXPORT_SYMBOL_GPL(mei_cldev_driver_unregister);
1130
1131
1132int __init mei_cl_bus_init(void)
1133{
1134 return bus_register(&mei_cl_bus_type);
1135}
1136
1137void __exit mei_cl_bus_exit(void)
1138{
1139 bus_unregister(&mei_cl_bus_type);
1140}