Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The Serio abstraction module
4 *
5 * Copyright (c) 1999-2004 Vojtech Pavlik
6 * Copyright (c) 2004 Dmitry Torokhov
7 * Copyright (c) 2003 Daniele Bellucci
8 */
9
10#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12#include <linux/stddef.h>
13#include <linux/module.h>
14#include <linux/serio.h>
15#include <linux/errno.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/workqueue.h>
19#include <linux/mutex.h>
20
21MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
22MODULE_DESCRIPTION("Serio abstraction core");
23MODULE_LICENSE("GPL");
24
25/*
26 * serio_mutex protects entire serio subsystem and is taken every time
27 * serio port or driver registered or unregistered.
28 */
29static DEFINE_MUTEX(serio_mutex);
30
31static LIST_HEAD(serio_list);
32
33static void serio_add_port(struct serio *serio);
34static int serio_reconnect_port(struct serio *serio);
35static void serio_disconnect_port(struct serio *serio);
36static void serio_reconnect_subtree(struct serio *serio);
37static void serio_attach_driver(struct serio_driver *drv);
38
39static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
40{
41 int retval;
42
43 mutex_lock(&serio->drv_mutex);
44 retval = drv->connect(serio, drv);
45 mutex_unlock(&serio->drv_mutex);
46
47 return retval;
48}
49
50static int serio_reconnect_driver(struct serio *serio)
51{
52 int retval = -1;
53
54 mutex_lock(&serio->drv_mutex);
55 if (serio->drv && serio->drv->reconnect)
56 retval = serio->drv->reconnect(serio);
57 mutex_unlock(&serio->drv_mutex);
58
59 return retval;
60}
61
62static void serio_disconnect_driver(struct serio *serio)
63{
64 mutex_lock(&serio->drv_mutex);
65 if (serio->drv)
66 serio->drv->disconnect(serio);
67 mutex_unlock(&serio->drv_mutex);
68}
69
70static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
71{
72 while (ids->type || ids->proto) {
73 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
74 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
75 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
76 (ids->id == SERIO_ANY || ids->id == serio->id.id))
77 return 1;
78 ids++;
79 }
80 return 0;
81}
82
83/*
84 * Basic serio -> driver core mappings
85 */
86
87static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
88{
89 int error;
90
91 if (serio_match_port(drv->id_table, serio)) {
92
93 serio->dev.driver = &drv->driver;
94 if (serio_connect_driver(serio, drv)) {
95 serio->dev.driver = NULL;
96 return -ENODEV;
97 }
98
99 error = device_bind_driver(&serio->dev);
100 if (error) {
101 dev_warn(&serio->dev,
102 "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
103 serio->phys, serio->name,
104 drv->description, error);
105 serio_disconnect_driver(serio);
106 serio->dev.driver = NULL;
107 return error;
108 }
109 }
110 return 0;
111}
112
113static void serio_find_driver(struct serio *serio)
114{
115 int error;
116
117 error = device_attach(&serio->dev);
118 if (error < 0 && error != -EPROBE_DEFER)
119 dev_warn(&serio->dev,
120 "device_attach() failed for %s (%s), error: %d\n",
121 serio->phys, serio->name, error);
122}
123
124
125/*
126 * Serio event processing.
127 */
128
129enum serio_event_type {
130 SERIO_RESCAN_PORT,
131 SERIO_RECONNECT_PORT,
132 SERIO_RECONNECT_SUBTREE,
133 SERIO_REGISTER_PORT,
134 SERIO_ATTACH_DRIVER,
135};
136
137struct serio_event {
138 enum serio_event_type type;
139 void *object;
140 struct module *owner;
141 struct list_head node;
142};
143
144static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
145static LIST_HEAD(serio_event_list);
146
147static struct serio_event *serio_get_event(void)
148{
149 struct serio_event *event = NULL;
150 unsigned long flags;
151
152 spin_lock_irqsave(&serio_event_lock, flags);
153
154 if (!list_empty(&serio_event_list)) {
155 event = list_first_entry(&serio_event_list,
156 struct serio_event, node);
157 list_del_init(&event->node);
158 }
159
160 spin_unlock_irqrestore(&serio_event_lock, flags);
161 return event;
162}
163
164static void serio_free_event(struct serio_event *event)
165{
166 module_put(event->owner);
167 kfree(event);
168}
169
170static void serio_remove_duplicate_events(void *object,
171 enum serio_event_type type)
172{
173 struct serio_event *e, *next;
174 unsigned long flags;
175
176 spin_lock_irqsave(&serio_event_lock, flags);
177
178 list_for_each_entry_safe(e, next, &serio_event_list, node) {
179 if (object == e->object) {
180 /*
181 * If this event is of different type we should not
182 * look further - we only suppress duplicate events
183 * that were sent back-to-back.
184 */
185 if (type != e->type)
186 break;
187
188 list_del_init(&e->node);
189 serio_free_event(e);
190 }
191 }
192
193 spin_unlock_irqrestore(&serio_event_lock, flags);
194}
195
196static void serio_handle_event(struct work_struct *work)
197{
198 struct serio_event *event;
199
200 mutex_lock(&serio_mutex);
201
202 while ((event = serio_get_event())) {
203
204 switch (event->type) {
205
206 case SERIO_REGISTER_PORT:
207 serio_add_port(event->object);
208 break;
209
210 case SERIO_RECONNECT_PORT:
211 serio_reconnect_port(event->object);
212 break;
213
214 case SERIO_RESCAN_PORT:
215 serio_disconnect_port(event->object);
216 serio_find_driver(event->object);
217 break;
218
219 case SERIO_RECONNECT_SUBTREE:
220 serio_reconnect_subtree(event->object);
221 break;
222
223 case SERIO_ATTACH_DRIVER:
224 serio_attach_driver(event->object);
225 break;
226 }
227
228 serio_remove_duplicate_events(event->object, event->type);
229 serio_free_event(event);
230 }
231
232 mutex_unlock(&serio_mutex);
233}
234
235static DECLARE_WORK(serio_event_work, serio_handle_event);
236
237static int serio_queue_event(void *object, struct module *owner,
238 enum serio_event_type event_type)
239{
240 unsigned long flags;
241 struct serio_event *event;
242 int retval = 0;
243
244 spin_lock_irqsave(&serio_event_lock, flags);
245
246 /*
247 * Scan event list for the other events for the same serio port,
248 * starting with the most recent one. If event is the same we
249 * do not need add new one. If event is of different type we
250 * need to add this event and should not look further because
251 * we need to preseve sequence of distinct events.
252 */
253 list_for_each_entry_reverse(event, &serio_event_list, node) {
254 if (event->object == object) {
255 if (event->type == event_type)
256 goto out;
257 break;
258 }
259 }
260
261 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
262 if (!event) {
263 pr_err("Not enough memory to queue event %d\n", event_type);
264 retval = -ENOMEM;
265 goto out;
266 }
267
268 if (!try_module_get(owner)) {
269 pr_warn("Can't get module reference, dropping event %d\n",
270 event_type);
271 kfree(event);
272 retval = -EINVAL;
273 goto out;
274 }
275
276 event->type = event_type;
277 event->object = object;
278 event->owner = owner;
279
280 list_add_tail(&event->node, &serio_event_list);
281 queue_work(system_long_wq, &serio_event_work);
282
283out:
284 spin_unlock_irqrestore(&serio_event_lock, flags);
285 return retval;
286}
287
288/*
289 * Remove all events that have been submitted for a given
290 * object, be it serio port or driver.
291 */
292static void serio_remove_pending_events(void *object)
293{
294 struct serio_event *event, *next;
295 unsigned long flags;
296
297 spin_lock_irqsave(&serio_event_lock, flags);
298
299 list_for_each_entry_safe(event, next, &serio_event_list, node) {
300 if (event->object == object) {
301 list_del_init(&event->node);
302 serio_free_event(event);
303 }
304 }
305
306 spin_unlock_irqrestore(&serio_event_lock, flags);
307}
308
309/*
310 * Locate child serio port (if any) that has not been fully registered yet.
311 *
312 * Children are registered by driver's connect() handler so there can't be a
313 * grandchild pending registration together with a child.
314 */
315static struct serio *serio_get_pending_child(struct serio *parent)
316{
317 struct serio_event *event;
318 struct serio *serio, *child = NULL;
319 unsigned long flags;
320
321 spin_lock_irqsave(&serio_event_lock, flags);
322
323 list_for_each_entry(event, &serio_event_list, node) {
324 if (event->type == SERIO_REGISTER_PORT) {
325 serio = event->object;
326 if (serio->parent == parent) {
327 child = serio;
328 break;
329 }
330 }
331 }
332
333 spin_unlock_irqrestore(&serio_event_lock, flags);
334 return child;
335}
336
337/*
338 * Serio port operations
339 */
340
341static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
342{
343 struct serio *serio = to_serio_port(dev);
344 return sprintf(buf, "%s\n", serio->name);
345}
346
347static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
348{
349 struct serio *serio = to_serio_port(dev);
350
351 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
352 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
353}
354
355static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
356{
357 struct serio *serio = to_serio_port(dev);
358 return sprintf(buf, "%02x\n", serio->id.type);
359}
360
361static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
362{
363 struct serio *serio = to_serio_port(dev);
364 return sprintf(buf, "%02x\n", serio->id.proto);
365}
366
367static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
368{
369 struct serio *serio = to_serio_port(dev);
370 return sprintf(buf, "%02x\n", serio->id.id);
371}
372
373static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
374{
375 struct serio *serio = to_serio_port(dev);
376 return sprintf(buf, "%02x\n", serio->id.extra);
377}
378
379static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
380{
381 struct serio *serio = to_serio_port(dev);
382 struct device_driver *drv;
383 int error;
384
385 error = mutex_lock_interruptible(&serio_mutex);
386 if (error)
387 return error;
388
389 if (!strncmp(buf, "none", count)) {
390 serio_disconnect_port(serio);
391 } else if (!strncmp(buf, "reconnect", count)) {
392 serio_reconnect_subtree(serio);
393 } else if (!strncmp(buf, "rescan", count)) {
394 serio_disconnect_port(serio);
395 serio_find_driver(serio);
396 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
397 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
398 serio_disconnect_port(serio);
399 error = serio_bind_driver(serio, to_serio_driver(drv));
400 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
401 } else {
402 error = -EINVAL;
403 }
404
405 mutex_unlock(&serio_mutex);
406
407 return error ? error : count;
408}
409
410static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
411{
412 struct serio *serio = to_serio_port(dev);
413 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
414}
415
416static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
417{
418 struct serio *serio = to_serio_port(dev);
419 int retval;
420
421 retval = count;
422 if (!strncmp(buf, "manual", count)) {
423 serio->manual_bind = true;
424 } else if (!strncmp(buf, "auto", count)) {
425 serio->manual_bind = false;
426 } else {
427 retval = -EINVAL;
428 }
429
430 return retval;
431}
432
433static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
434{
435 struct serio *serio = to_serio_port(dev);
436
437 return sprintf(buf, "%s\n", serio->firmware_id);
438}
439
440static DEVICE_ATTR_RO(type);
441static DEVICE_ATTR_RO(proto);
442static DEVICE_ATTR_RO(id);
443static DEVICE_ATTR_RO(extra);
444
445static struct attribute *serio_device_id_attrs[] = {
446 &dev_attr_type.attr,
447 &dev_attr_proto.attr,
448 &dev_attr_id.attr,
449 &dev_attr_extra.attr,
450 NULL
451};
452
453static const struct attribute_group serio_id_attr_group = {
454 .name = "id",
455 .attrs = serio_device_id_attrs,
456};
457
458static DEVICE_ATTR_RO(modalias);
459static DEVICE_ATTR_WO(drvctl);
460static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
461static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
462static DEVICE_ATTR_RO(firmware_id);
463
464static struct attribute *serio_device_attrs[] = {
465 &dev_attr_modalias.attr,
466 &dev_attr_description.attr,
467 &dev_attr_drvctl.attr,
468 &dev_attr_bind_mode.attr,
469 &dev_attr_firmware_id.attr,
470 NULL
471};
472
473static const struct attribute_group serio_device_attr_group = {
474 .attrs = serio_device_attrs,
475};
476
477static const struct attribute_group *serio_device_attr_groups[] = {
478 &serio_id_attr_group,
479 &serio_device_attr_group,
480 NULL
481};
482
483static void serio_release_port(struct device *dev)
484{
485 struct serio *serio = to_serio_port(dev);
486
487 kfree(serio);
488 module_put(THIS_MODULE);
489}
490
491/*
492 * Prepare serio port for registration.
493 */
494static void serio_init_port(struct serio *serio)
495{
496 static atomic_t serio_no = ATOMIC_INIT(-1);
497
498 __module_get(THIS_MODULE);
499
500 INIT_LIST_HEAD(&serio->node);
501 INIT_LIST_HEAD(&serio->child_node);
502 INIT_LIST_HEAD(&serio->children);
503 spin_lock_init(&serio->lock);
504 mutex_init(&serio->drv_mutex);
505 device_initialize(&serio->dev);
506 dev_set_name(&serio->dev, "serio%lu",
507 (unsigned long)atomic_inc_return(&serio_no));
508 serio->dev.bus = &serio_bus;
509 serio->dev.release = serio_release_port;
510 serio->dev.groups = serio_device_attr_groups;
511 if (serio->parent) {
512 serio->dev.parent = &serio->parent->dev;
513 serio->depth = serio->parent->depth + 1;
514 } else
515 serio->depth = 0;
516 lockdep_set_subclass(&serio->lock, serio->depth);
517}
518
519/*
520 * Complete serio port registration.
521 * Driver core will attempt to find appropriate driver for the port.
522 */
523static void serio_add_port(struct serio *serio)
524{
525 struct serio *parent = serio->parent;
526 int error;
527
528 if (parent) {
529 serio_pause_rx(parent);
530 list_add_tail(&serio->child_node, &parent->children);
531 serio_continue_rx(parent);
532 }
533
534 list_add_tail(&serio->node, &serio_list);
535
536 if (serio->start)
537 serio->start(serio);
538
539 error = device_add(&serio->dev);
540 if (error)
541 dev_err(&serio->dev,
542 "device_add() failed for %s (%s), error: %d\n",
543 serio->phys, serio->name, error);
544}
545
546/*
547 * serio_destroy_port() completes unregistration process and removes
548 * port from the system
549 */
550static void serio_destroy_port(struct serio *serio)
551{
552 struct serio *child;
553
554 while ((child = serio_get_pending_child(serio)) != NULL) {
555 serio_remove_pending_events(child);
556 put_device(&child->dev);
557 }
558
559 if (serio->stop)
560 serio->stop(serio);
561
562 if (serio->parent) {
563 serio_pause_rx(serio->parent);
564 list_del_init(&serio->child_node);
565 serio_continue_rx(serio->parent);
566 serio->parent = NULL;
567 }
568
569 if (device_is_registered(&serio->dev))
570 device_del(&serio->dev);
571
572 list_del_init(&serio->node);
573 serio_remove_pending_events(serio);
574 put_device(&serio->dev);
575}
576
577/*
578 * Reconnect serio port (re-initialize attached device).
579 * If reconnect fails (old device is no longer attached or
580 * there was no device to begin with) we do full rescan in
581 * hope of finding a driver for the port.
582 */
583static int serio_reconnect_port(struct serio *serio)
584{
585 int error = serio_reconnect_driver(serio);
586
587 if (error) {
588 serio_disconnect_port(serio);
589 serio_find_driver(serio);
590 }
591
592 return error;
593}
594
595/*
596 * Reconnect serio port and all its children (re-initialize attached
597 * devices).
598 */
599static void serio_reconnect_subtree(struct serio *root)
600{
601 struct serio *s = root;
602 int error;
603
604 do {
605 error = serio_reconnect_port(s);
606 if (!error) {
607 /*
608 * Reconnect was successful, move on to do the
609 * first child.
610 */
611 if (!list_empty(&s->children)) {
612 s = list_first_entry(&s->children,
613 struct serio, child_node);
614 continue;
615 }
616 }
617
618 /*
619 * Either it was a leaf node or reconnect failed and it
620 * became a leaf node. Continue reconnecting starting with
621 * the next sibling of the parent node.
622 */
623 while (s != root) {
624 struct serio *parent = s->parent;
625
626 if (!list_is_last(&s->child_node, &parent->children)) {
627 s = list_entry(s->child_node.next,
628 struct serio, child_node);
629 break;
630 }
631
632 s = parent;
633 }
634 } while (s != root);
635}
636
637/*
638 * serio_disconnect_port() unbinds a port from its driver. As a side effect
639 * all children ports are unbound and destroyed.
640 */
641static void serio_disconnect_port(struct serio *serio)
642{
643 struct serio *s = serio;
644
645 /*
646 * Children ports should be disconnected and destroyed
647 * first; we travel the tree in depth-first order.
648 */
649 while (!list_empty(&serio->children)) {
650
651 /* Locate a leaf */
652 while (!list_empty(&s->children))
653 s = list_first_entry(&s->children,
654 struct serio, child_node);
655
656 /*
657 * Prune this leaf node unless it is the one we
658 * started with.
659 */
660 if (s != serio) {
661 struct serio *parent = s->parent;
662
663 device_release_driver(&s->dev);
664 serio_destroy_port(s);
665
666 s = parent;
667 }
668 }
669
670 /*
671 * OK, no children left, now disconnect this port.
672 */
673 device_release_driver(&serio->dev);
674}
675
676void serio_rescan(struct serio *serio)
677{
678 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
679}
680EXPORT_SYMBOL(serio_rescan);
681
682void serio_reconnect(struct serio *serio)
683{
684 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
685}
686EXPORT_SYMBOL(serio_reconnect);
687
688/*
689 * Submits register request to kseriod for subsequent execution.
690 * Note that port registration is always asynchronous.
691 */
692void __serio_register_port(struct serio *serio, struct module *owner)
693{
694 serio_init_port(serio);
695 serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
696}
697EXPORT_SYMBOL(__serio_register_port);
698
699/*
700 * Synchronously unregisters serio port.
701 */
702void serio_unregister_port(struct serio *serio)
703{
704 mutex_lock(&serio_mutex);
705 serio_disconnect_port(serio);
706 serio_destroy_port(serio);
707 mutex_unlock(&serio_mutex);
708}
709EXPORT_SYMBOL(serio_unregister_port);
710
711/*
712 * Safely unregisters children ports if they are present.
713 */
714void serio_unregister_child_port(struct serio *serio)
715{
716 struct serio *s, *next;
717
718 mutex_lock(&serio_mutex);
719 list_for_each_entry_safe(s, next, &serio->children, child_node) {
720 serio_disconnect_port(s);
721 serio_destroy_port(s);
722 }
723 mutex_unlock(&serio_mutex);
724}
725EXPORT_SYMBOL(serio_unregister_child_port);
726
727
728/*
729 * Serio driver operations
730 */
731
732static ssize_t description_show(struct device_driver *drv, char *buf)
733{
734 struct serio_driver *driver = to_serio_driver(drv);
735 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
736}
737static DRIVER_ATTR_RO(description);
738
739static ssize_t bind_mode_show(struct device_driver *drv, char *buf)
740{
741 struct serio_driver *serio_drv = to_serio_driver(drv);
742 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
743}
744
745static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count)
746{
747 struct serio_driver *serio_drv = to_serio_driver(drv);
748 int retval;
749
750 retval = count;
751 if (!strncmp(buf, "manual", count)) {
752 serio_drv->manual_bind = true;
753 } else if (!strncmp(buf, "auto", count)) {
754 serio_drv->manual_bind = false;
755 } else {
756 retval = -EINVAL;
757 }
758
759 return retval;
760}
761static DRIVER_ATTR_RW(bind_mode);
762
763static struct attribute *serio_driver_attrs[] = {
764 &driver_attr_description.attr,
765 &driver_attr_bind_mode.attr,
766 NULL,
767};
768ATTRIBUTE_GROUPS(serio_driver);
769
770static int serio_driver_probe(struct device *dev)
771{
772 struct serio *serio = to_serio_port(dev);
773 struct serio_driver *drv = to_serio_driver(dev->driver);
774
775 return serio_connect_driver(serio, drv);
776}
777
778static void serio_driver_remove(struct device *dev)
779{
780 struct serio *serio = to_serio_port(dev);
781
782 serio_disconnect_driver(serio);
783}
784
785static void serio_cleanup(struct serio *serio)
786{
787 mutex_lock(&serio->drv_mutex);
788 if (serio->drv && serio->drv->cleanup)
789 serio->drv->cleanup(serio);
790 mutex_unlock(&serio->drv_mutex);
791}
792
793static void serio_shutdown(struct device *dev)
794{
795 struct serio *serio = to_serio_port(dev);
796
797 serio_cleanup(serio);
798}
799
800static void serio_attach_driver(struct serio_driver *drv)
801{
802 int error;
803
804 error = driver_attach(&drv->driver);
805 if (error)
806 pr_warn("driver_attach() failed for %s with error %d\n",
807 drv->driver.name, error);
808}
809
810int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
811{
812 bool manual_bind = drv->manual_bind;
813 int error;
814
815 drv->driver.bus = &serio_bus;
816 drv->driver.owner = owner;
817 drv->driver.mod_name = mod_name;
818
819 /*
820 * Temporarily disable automatic binding because probing
821 * takes long time and we are better off doing it in kseriod
822 */
823 drv->manual_bind = true;
824
825 error = driver_register(&drv->driver);
826 if (error) {
827 pr_err("driver_register() failed for %s, error: %d\n",
828 drv->driver.name, error);
829 return error;
830 }
831
832 /*
833 * Restore original bind mode and let kseriod bind the
834 * driver to free ports
835 */
836 if (!manual_bind) {
837 drv->manual_bind = false;
838 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
839 if (error) {
840 driver_unregister(&drv->driver);
841 return error;
842 }
843 }
844
845 return 0;
846}
847EXPORT_SYMBOL(__serio_register_driver);
848
849void serio_unregister_driver(struct serio_driver *drv)
850{
851 struct serio *serio;
852
853 mutex_lock(&serio_mutex);
854
855 drv->manual_bind = true; /* so serio_find_driver ignores it */
856 serio_remove_pending_events(drv);
857
858start_over:
859 list_for_each_entry(serio, &serio_list, node) {
860 if (serio->drv == drv) {
861 serio_disconnect_port(serio);
862 serio_find_driver(serio);
863 /* we could've deleted some ports, restart */
864 goto start_over;
865 }
866 }
867
868 driver_unregister(&drv->driver);
869 mutex_unlock(&serio_mutex);
870}
871EXPORT_SYMBOL(serio_unregister_driver);
872
873static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
874{
875 serio_pause_rx(serio);
876 serio->drv = drv;
877 serio_continue_rx(serio);
878}
879
880static int serio_bus_match(struct device *dev, struct device_driver *drv)
881{
882 struct serio *serio = to_serio_port(dev);
883 struct serio_driver *serio_drv = to_serio_driver(drv);
884
885 if (serio->manual_bind || serio_drv->manual_bind)
886 return 0;
887
888 return serio_match_port(serio_drv->id_table, serio);
889}
890
891#define SERIO_ADD_UEVENT_VAR(fmt, val...) \
892 do { \
893 int err = add_uevent_var(env, fmt, val); \
894 if (err) \
895 return err; \
896 } while (0)
897
898static int serio_uevent(const struct device *dev, struct kobj_uevent_env *env)
899{
900 const struct serio *serio;
901
902 if (!dev)
903 return -ENODEV;
904
905 serio = to_serio_port(dev);
906
907 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
908 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
909 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
910 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
911
912 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
913 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
914
915 if (serio->firmware_id[0])
916 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
917 serio->firmware_id);
918
919 return 0;
920}
921#undef SERIO_ADD_UEVENT_VAR
922
923#ifdef CONFIG_PM
924static int serio_suspend(struct device *dev)
925{
926 struct serio *serio = to_serio_port(dev);
927
928 serio_cleanup(serio);
929
930 return 0;
931}
932
933static int serio_resume(struct device *dev)
934{
935 struct serio *serio = to_serio_port(dev);
936 int error = -ENOENT;
937
938 mutex_lock(&serio->drv_mutex);
939 if (serio->drv && serio->drv->fast_reconnect) {
940 error = serio->drv->fast_reconnect(serio);
941 if (error && error != -ENOENT)
942 dev_warn(dev, "fast reconnect failed with error %d\n",
943 error);
944 }
945 mutex_unlock(&serio->drv_mutex);
946
947 if (error) {
948 /*
949 * Driver reconnect can take a while, so better let
950 * kseriod deal with it.
951 */
952 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
953 }
954
955 return 0;
956}
957
958static const struct dev_pm_ops serio_pm_ops = {
959 .suspend = serio_suspend,
960 .resume = serio_resume,
961 .poweroff = serio_suspend,
962 .restore = serio_resume,
963};
964#endif /* CONFIG_PM */
965
966/* called from serio_driver->connect/disconnect methods under serio_mutex */
967int serio_open(struct serio *serio, struct serio_driver *drv)
968{
969 serio_set_drv(serio, drv);
970
971 if (serio->open && serio->open(serio)) {
972 serio_set_drv(serio, NULL);
973 return -1;
974 }
975 return 0;
976}
977EXPORT_SYMBOL(serio_open);
978
979/* called from serio_driver->connect/disconnect methods under serio_mutex */
980void serio_close(struct serio *serio)
981{
982 if (serio->close)
983 serio->close(serio);
984
985 serio_set_drv(serio, NULL);
986}
987EXPORT_SYMBOL(serio_close);
988
989irqreturn_t serio_interrupt(struct serio *serio,
990 unsigned char data, unsigned int dfl)
991{
992 unsigned long flags;
993 irqreturn_t ret = IRQ_NONE;
994
995 spin_lock_irqsave(&serio->lock, flags);
996
997 if (likely(serio->drv)) {
998 ret = serio->drv->interrupt(serio, data, dfl);
999 } else if (!dfl && device_is_registered(&serio->dev)) {
1000 serio_rescan(serio);
1001 ret = IRQ_HANDLED;
1002 }
1003
1004 spin_unlock_irqrestore(&serio->lock, flags);
1005
1006 return ret;
1007}
1008EXPORT_SYMBOL(serio_interrupt);
1009
1010const struct bus_type serio_bus = {
1011 .name = "serio",
1012 .drv_groups = serio_driver_groups,
1013 .match = serio_bus_match,
1014 .uevent = serio_uevent,
1015 .probe = serio_driver_probe,
1016 .remove = serio_driver_remove,
1017 .shutdown = serio_shutdown,
1018#ifdef CONFIG_PM
1019 .pm = &serio_pm_ops,
1020#endif
1021};
1022EXPORT_SYMBOL(serio_bus);
1023
1024static int __init serio_init(void)
1025{
1026 int error;
1027
1028 error = bus_register(&serio_bus);
1029 if (error) {
1030 pr_err("Failed to register serio bus, error: %d\n", error);
1031 return error;
1032 }
1033
1034 return 0;
1035}
1036
1037static void __exit serio_exit(void)
1038{
1039 bus_unregister(&serio_bus);
1040
1041 /*
1042 * There should not be any outstanding events but work may
1043 * still be scheduled so simply cancel it.
1044 */
1045 cancel_work_sync(&serio_event_work);
1046}
1047
1048subsys_initcall(serio_init);
1049module_exit(serio_exit);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * The Serio abstraction module
4 *
5 * Copyright (c) 1999-2004 Vojtech Pavlik
6 * Copyright (c) 2004 Dmitry Torokhov
7 * Copyright (c) 2003 Daniele Bellucci
8 */
9
10/*
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/stddef.h>
16#include <linux/module.h>
17#include <linux/serio.h>
18#include <linux/errno.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/workqueue.h>
22#include <linux/mutex.h>
23
24MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>");
25MODULE_DESCRIPTION("Serio abstraction core");
26MODULE_LICENSE("GPL");
27
28/*
29 * serio_mutex protects entire serio subsystem and is taken every time
30 * serio port or driver registered or unregistered.
31 */
32static DEFINE_MUTEX(serio_mutex);
33
34static LIST_HEAD(serio_list);
35
36static void serio_add_port(struct serio *serio);
37static int serio_reconnect_port(struct serio *serio);
38static void serio_disconnect_port(struct serio *serio);
39static void serio_reconnect_subtree(struct serio *serio);
40static void serio_attach_driver(struct serio_driver *drv);
41
42static int serio_connect_driver(struct serio *serio, struct serio_driver *drv)
43{
44 int retval;
45
46 mutex_lock(&serio->drv_mutex);
47 retval = drv->connect(serio, drv);
48 mutex_unlock(&serio->drv_mutex);
49
50 return retval;
51}
52
53static int serio_reconnect_driver(struct serio *serio)
54{
55 int retval = -1;
56
57 mutex_lock(&serio->drv_mutex);
58 if (serio->drv && serio->drv->reconnect)
59 retval = serio->drv->reconnect(serio);
60 mutex_unlock(&serio->drv_mutex);
61
62 return retval;
63}
64
65static void serio_disconnect_driver(struct serio *serio)
66{
67 mutex_lock(&serio->drv_mutex);
68 if (serio->drv)
69 serio->drv->disconnect(serio);
70 mutex_unlock(&serio->drv_mutex);
71}
72
73static int serio_match_port(const struct serio_device_id *ids, struct serio *serio)
74{
75 while (ids->type || ids->proto) {
76 if ((ids->type == SERIO_ANY || ids->type == serio->id.type) &&
77 (ids->proto == SERIO_ANY || ids->proto == serio->id.proto) &&
78 (ids->extra == SERIO_ANY || ids->extra == serio->id.extra) &&
79 (ids->id == SERIO_ANY || ids->id == serio->id.id))
80 return 1;
81 ids++;
82 }
83 return 0;
84}
85
86/*
87 * Basic serio -> driver core mappings
88 */
89
90static int serio_bind_driver(struct serio *serio, struct serio_driver *drv)
91{
92 int error;
93
94 if (serio_match_port(drv->id_table, serio)) {
95
96 serio->dev.driver = &drv->driver;
97 if (serio_connect_driver(serio, drv)) {
98 serio->dev.driver = NULL;
99 return -ENODEV;
100 }
101
102 error = device_bind_driver(&serio->dev);
103 if (error) {
104 dev_warn(&serio->dev,
105 "device_bind_driver() failed for %s (%s) and %s, error: %d\n",
106 serio->phys, serio->name,
107 drv->description, error);
108 serio_disconnect_driver(serio);
109 serio->dev.driver = NULL;
110 return error;
111 }
112 }
113 return 0;
114}
115
116static void serio_find_driver(struct serio *serio)
117{
118 int error;
119
120 error = device_attach(&serio->dev);
121 if (error < 0 && error != -EPROBE_DEFER)
122 dev_warn(&serio->dev,
123 "device_attach() failed for %s (%s), error: %d\n",
124 serio->phys, serio->name, error);
125}
126
127
128/*
129 * Serio event processing.
130 */
131
132enum serio_event_type {
133 SERIO_RESCAN_PORT,
134 SERIO_RECONNECT_PORT,
135 SERIO_RECONNECT_SUBTREE,
136 SERIO_REGISTER_PORT,
137 SERIO_ATTACH_DRIVER,
138};
139
140struct serio_event {
141 enum serio_event_type type;
142 void *object;
143 struct module *owner;
144 struct list_head node;
145};
146
147static DEFINE_SPINLOCK(serio_event_lock); /* protects serio_event_list */
148static LIST_HEAD(serio_event_list);
149
150static struct serio_event *serio_get_event(void)
151{
152 struct serio_event *event = NULL;
153 unsigned long flags;
154
155 spin_lock_irqsave(&serio_event_lock, flags);
156
157 if (!list_empty(&serio_event_list)) {
158 event = list_first_entry(&serio_event_list,
159 struct serio_event, node);
160 list_del_init(&event->node);
161 }
162
163 spin_unlock_irqrestore(&serio_event_lock, flags);
164 return event;
165}
166
167static void serio_free_event(struct serio_event *event)
168{
169 module_put(event->owner);
170 kfree(event);
171}
172
173static void serio_remove_duplicate_events(void *object,
174 enum serio_event_type type)
175{
176 struct serio_event *e, *next;
177 unsigned long flags;
178
179 spin_lock_irqsave(&serio_event_lock, flags);
180
181 list_for_each_entry_safe(e, next, &serio_event_list, node) {
182 if (object == e->object) {
183 /*
184 * If this event is of different type we should not
185 * look further - we only suppress duplicate events
186 * that were sent back-to-back.
187 */
188 if (type != e->type)
189 break;
190
191 list_del_init(&e->node);
192 serio_free_event(e);
193 }
194 }
195
196 spin_unlock_irqrestore(&serio_event_lock, flags);
197}
198
199static void serio_handle_event(struct work_struct *work)
200{
201 struct serio_event *event;
202
203 mutex_lock(&serio_mutex);
204
205 while ((event = serio_get_event())) {
206
207 switch (event->type) {
208
209 case SERIO_REGISTER_PORT:
210 serio_add_port(event->object);
211 break;
212
213 case SERIO_RECONNECT_PORT:
214 serio_reconnect_port(event->object);
215 break;
216
217 case SERIO_RESCAN_PORT:
218 serio_disconnect_port(event->object);
219 serio_find_driver(event->object);
220 break;
221
222 case SERIO_RECONNECT_SUBTREE:
223 serio_reconnect_subtree(event->object);
224 break;
225
226 case SERIO_ATTACH_DRIVER:
227 serio_attach_driver(event->object);
228 break;
229 }
230
231 serio_remove_duplicate_events(event->object, event->type);
232 serio_free_event(event);
233 }
234
235 mutex_unlock(&serio_mutex);
236}
237
238static DECLARE_WORK(serio_event_work, serio_handle_event);
239
240static int serio_queue_event(void *object, struct module *owner,
241 enum serio_event_type event_type)
242{
243 unsigned long flags;
244 struct serio_event *event;
245 int retval = 0;
246
247 spin_lock_irqsave(&serio_event_lock, flags);
248
249 /*
250 * Scan event list for the other events for the same serio port,
251 * starting with the most recent one. If event is the same we
252 * do not need add new one. If event is of different type we
253 * need to add this event and should not look further because
254 * we need to preseve sequence of distinct events.
255 */
256 list_for_each_entry_reverse(event, &serio_event_list, node) {
257 if (event->object == object) {
258 if (event->type == event_type)
259 goto out;
260 break;
261 }
262 }
263
264 event = kmalloc(sizeof(struct serio_event), GFP_ATOMIC);
265 if (!event) {
266 pr_err("Not enough memory to queue event %d\n", event_type);
267 retval = -ENOMEM;
268 goto out;
269 }
270
271 if (!try_module_get(owner)) {
272 pr_warn("Can't get module reference, dropping event %d\n",
273 event_type);
274 kfree(event);
275 retval = -EINVAL;
276 goto out;
277 }
278
279 event->type = event_type;
280 event->object = object;
281 event->owner = owner;
282
283 list_add_tail(&event->node, &serio_event_list);
284 queue_work(system_long_wq, &serio_event_work);
285
286out:
287 spin_unlock_irqrestore(&serio_event_lock, flags);
288 return retval;
289}
290
291/*
292 * Remove all events that have been submitted for a given
293 * object, be it serio port or driver.
294 */
295static void serio_remove_pending_events(void *object)
296{
297 struct serio_event *event, *next;
298 unsigned long flags;
299
300 spin_lock_irqsave(&serio_event_lock, flags);
301
302 list_for_each_entry_safe(event, next, &serio_event_list, node) {
303 if (event->object == object) {
304 list_del_init(&event->node);
305 serio_free_event(event);
306 }
307 }
308
309 spin_unlock_irqrestore(&serio_event_lock, flags);
310}
311
312/*
313 * Locate child serio port (if any) that has not been fully registered yet.
314 *
315 * Children are registered by driver's connect() handler so there can't be a
316 * grandchild pending registration together with a child.
317 */
318static struct serio *serio_get_pending_child(struct serio *parent)
319{
320 struct serio_event *event;
321 struct serio *serio, *child = NULL;
322 unsigned long flags;
323
324 spin_lock_irqsave(&serio_event_lock, flags);
325
326 list_for_each_entry(event, &serio_event_list, node) {
327 if (event->type == SERIO_REGISTER_PORT) {
328 serio = event->object;
329 if (serio->parent == parent) {
330 child = serio;
331 break;
332 }
333 }
334 }
335
336 spin_unlock_irqrestore(&serio_event_lock, flags);
337 return child;
338}
339
340/*
341 * Serio port operations
342 */
343
344static ssize_t serio_show_description(struct device *dev, struct device_attribute *attr, char *buf)
345{
346 struct serio *serio = to_serio_port(dev);
347 return sprintf(buf, "%s\n", serio->name);
348}
349
350static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf)
351{
352 struct serio *serio = to_serio_port(dev);
353
354 return sprintf(buf, "serio:ty%02Xpr%02Xid%02Xex%02X\n",
355 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
356}
357
358static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf)
359{
360 struct serio *serio = to_serio_port(dev);
361 return sprintf(buf, "%02x\n", serio->id.type);
362}
363
364static ssize_t proto_show(struct device *dev, struct device_attribute *attr, char *buf)
365{
366 struct serio *serio = to_serio_port(dev);
367 return sprintf(buf, "%02x\n", serio->id.proto);
368}
369
370static ssize_t id_show(struct device *dev, struct device_attribute *attr, char *buf)
371{
372 struct serio *serio = to_serio_port(dev);
373 return sprintf(buf, "%02x\n", serio->id.id);
374}
375
376static ssize_t extra_show(struct device *dev, struct device_attribute *attr, char *buf)
377{
378 struct serio *serio = to_serio_port(dev);
379 return sprintf(buf, "%02x\n", serio->id.extra);
380}
381
382static ssize_t drvctl_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
383{
384 struct serio *serio = to_serio_port(dev);
385 struct device_driver *drv;
386 int error;
387
388 error = mutex_lock_interruptible(&serio_mutex);
389 if (error)
390 return error;
391
392 if (!strncmp(buf, "none", count)) {
393 serio_disconnect_port(serio);
394 } else if (!strncmp(buf, "reconnect", count)) {
395 serio_reconnect_subtree(serio);
396 } else if (!strncmp(buf, "rescan", count)) {
397 serio_disconnect_port(serio);
398 serio_find_driver(serio);
399 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
400 } else if ((drv = driver_find(buf, &serio_bus)) != NULL) {
401 serio_disconnect_port(serio);
402 error = serio_bind_driver(serio, to_serio_driver(drv));
403 serio_remove_duplicate_events(serio, SERIO_RESCAN_PORT);
404 } else {
405 error = -EINVAL;
406 }
407
408 mutex_unlock(&serio_mutex);
409
410 return error ? error : count;
411}
412
413static ssize_t serio_show_bind_mode(struct device *dev, struct device_attribute *attr, char *buf)
414{
415 struct serio *serio = to_serio_port(dev);
416 return sprintf(buf, "%s\n", serio->manual_bind ? "manual" : "auto");
417}
418
419static ssize_t serio_set_bind_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
420{
421 struct serio *serio = to_serio_port(dev);
422 int retval;
423
424 retval = count;
425 if (!strncmp(buf, "manual", count)) {
426 serio->manual_bind = true;
427 } else if (!strncmp(buf, "auto", count)) {
428 serio->manual_bind = false;
429 } else {
430 retval = -EINVAL;
431 }
432
433 return retval;
434}
435
436static ssize_t firmware_id_show(struct device *dev, struct device_attribute *attr, char *buf)
437{
438 struct serio *serio = to_serio_port(dev);
439
440 return sprintf(buf, "%s\n", serio->firmware_id);
441}
442
443static DEVICE_ATTR_RO(type);
444static DEVICE_ATTR_RO(proto);
445static DEVICE_ATTR_RO(id);
446static DEVICE_ATTR_RO(extra);
447
448static struct attribute *serio_device_id_attrs[] = {
449 &dev_attr_type.attr,
450 &dev_attr_proto.attr,
451 &dev_attr_id.attr,
452 &dev_attr_extra.attr,
453 NULL
454};
455
456static const struct attribute_group serio_id_attr_group = {
457 .name = "id",
458 .attrs = serio_device_id_attrs,
459};
460
461static DEVICE_ATTR_RO(modalias);
462static DEVICE_ATTR_WO(drvctl);
463static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
464static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
465static DEVICE_ATTR_RO(firmware_id);
466
467static struct attribute *serio_device_attrs[] = {
468 &dev_attr_modalias.attr,
469 &dev_attr_description.attr,
470 &dev_attr_drvctl.attr,
471 &dev_attr_bind_mode.attr,
472 &dev_attr_firmware_id.attr,
473 NULL
474};
475
476static const struct attribute_group serio_device_attr_group = {
477 .attrs = serio_device_attrs,
478};
479
480static const struct attribute_group *serio_device_attr_groups[] = {
481 &serio_id_attr_group,
482 &serio_device_attr_group,
483 NULL
484};
485
486static void serio_release_port(struct device *dev)
487{
488 struct serio *serio = to_serio_port(dev);
489
490 kfree(serio);
491 module_put(THIS_MODULE);
492}
493
494/*
495 * Prepare serio port for registration.
496 */
497static void serio_init_port(struct serio *serio)
498{
499 static atomic_t serio_no = ATOMIC_INIT(-1);
500
501 __module_get(THIS_MODULE);
502
503 INIT_LIST_HEAD(&serio->node);
504 INIT_LIST_HEAD(&serio->child_node);
505 INIT_LIST_HEAD(&serio->children);
506 spin_lock_init(&serio->lock);
507 mutex_init(&serio->drv_mutex);
508 device_initialize(&serio->dev);
509 dev_set_name(&serio->dev, "serio%lu",
510 (unsigned long)atomic_inc_return(&serio_no));
511 serio->dev.bus = &serio_bus;
512 serio->dev.release = serio_release_port;
513 serio->dev.groups = serio_device_attr_groups;
514 if (serio->parent) {
515 serio->dev.parent = &serio->parent->dev;
516 serio->depth = serio->parent->depth + 1;
517 } else
518 serio->depth = 0;
519 lockdep_set_subclass(&serio->lock, serio->depth);
520}
521
522/*
523 * Complete serio port registration.
524 * Driver core will attempt to find appropriate driver for the port.
525 */
526static void serio_add_port(struct serio *serio)
527{
528 struct serio *parent = serio->parent;
529 int error;
530
531 if (parent) {
532 serio_pause_rx(parent);
533 list_add_tail(&serio->child_node, &parent->children);
534 serio_continue_rx(parent);
535 }
536
537 list_add_tail(&serio->node, &serio_list);
538
539 if (serio->start)
540 serio->start(serio);
541
542 error = device_add(&serio->dev);
543 if (error)
544 dev_err(&serio->dev,
545 "device_add() failed for %s (%s), error: %d\n",
546 serio->phys, serio->name, error);
547}
548
549/*
550 * serio_destroy_port() completes unregistration process and removes
551 * port from the system
552 */
553static void serio_destroy_port(struct serio *serio)
554{
555 struct serio *child;
556
557 while ((child = serio_get_pending_child(serio)) != NULL) {
558 serio_remove_pending_events(child);
559 put_device(&child->dev);
560 }
561
562 if (serio->stop)
563 serio->stop(serio);
564
565 if (serio->parent) {
566 serio_pause_rx(serio->parent);
567 list_del_init(&serio->child_node);
568 serio_continue_rx(serio->parent);
569 serio->parent = NULL;
570 }
571
572 if (device_is_registered(&serio->dev))
573 device_del(&serio->dev);
574
575 list_del_init(&serio->node);
576 serio_remove_pending_events(serio);
577 put_device(&serio->dev);
578}
579
580/*
581 * Reconnect serio port (re-initialize attached device).
582 * If reconnect fails (old device is no longer attached or
583 * there was no device to begin with) we do full rescan in
584 * hope of finding a driver for the port.
585 */
586static int serio_reconnect_port(struct serio *serio)
587{
588 int error = serio_reconnect_driver(serio);
589
590 if (error) {
591 serio_disconnect_port(serio);
592 serio_find_driver(serio);
593 }
594
595 return error;
596}
597
598/*
599 * Reconnect serio port and all its children (re-initialize attached
600 * devices).
601 */
602static void serio_reconnect_subtree(struct serio *root)
603{
604 struct serio *s = root;
605 int error;
606
607 do {
608 error = serio_reconnect_port(s);
609 if (!error) {
610 /*
611 * Reconnect was successful, move on to do the
612 * first child.
613 */
614 if (!list_empty(&s->children)) {
615 s = list_first_entry(&s->children,
616 struct serio, child_node);
617 continue;
618 }
619 }
620
621 /*
622 * Either it was a leaf node or reconnect failed and it
623 * became a leaf node. Continue reconnecting starting with
624 * the next sibling of the parent node.
625 */
626 while (s != root) {
627 struct serio *parent = s->parent;
628
629 if (!list_is_last(&s->child_node, &parent->children)) {
630 s = list_entry(s->child_node.next,
631 struct serio, child_node);
632 break;
633 }
634
635 s = parent;
636 }
637 } while (s != root);
638}
639
640/*
641 * serio_disconnect_port() unbinds a port from its driver. As a side effect
642 * all children ports are unbound and destroyed.
643 */
644static void serio_disconnect_port(struct serio *serio)
645{
646 struct serio *s = serio;
647
648 /*
649 * Children ports should be disconnected and destroyed
650 * first; we travel the tree in depth-first order.
651 */
652 while (!list_empty(&serio->children)) {
653
654 /* Locate a leaf */
655 while (!list_empty(&s->children))
656 s = list_first_entry(&s->children,
657 struct serio, child_node);
658
659 /*
660 * Prune this leaf node unless it is the one we
661 * started with.
662 */
663 if (s != serio) {
664 struct serio *parent = s->parent;
665
666 device_release_driver(&s->dev);
667 serio_destroy_port(s);
668
669 s = parent;
670 }
671 }
672
673 /*
674 * OK, no children left, now disconnect this port.
675 */
676 device_release_driver(&serio->dev);
677}
678
679void serio_rescan(struct serio *serio)
680{
681 serio_queue_event(serio, NULL, SERIO_RESCAN_PORT);
682}
683EXPORT_SYMBOL(serio_rescan);
684
685void serio_reconnect(struct serio *serio)
686{
687 serio_queue_event(serio, NULL, SERIO_RECONNECT_SUBTREE);
688}
689EXPORT_SYMBOL(serio_reconnect);
690
691/*
692 * Submits register request to kseriod for subsequent execution.
693 * Note that port registration is always asynchronous.
694 */
695void __serio_register_port(struct serio *serio, struct module *owner)
696{
697 serio_init_port(serio);
698 serio_queue_event(serio, owner, SERIO_REGISTER_PORT);
699}
700EXPORT_SYMBOL(__serio_register_port);
701
702/*
703 * Synchronously unregisters serio port.
704 */
705void serio_unregister_port(struct serio *serio)
706{
707 mutex_lock(&serio_mutex);
708 serio_disconnect_port(serio);
709 serio_destroy_port(serio);
710 mutex_unlock(&serio_mutex);
711}
712EXPORT_SYMBOL(serio_unregister_port);
713
714/*
715 * Safely unregisters children ports if they are present.
716 */
717void serio_unregister_child_port(struct serio *serio)
718{
719 struct serio *s, *next;
720
721 mutex_lock(&serio_mutex);
722 list_for_each_entry_safe(s, next, &serio->children, child_node) {
723 serio_disconnect_port(s);
724 serio_destroy_port(s);
725 }
726 mutex_unlock(&serio_mutex);
727}
728EXPORT_SYMBOL(serio_unregister_child_port);
729
730
731/*
732 * Serio driver operations
733 */
734
735static ssize_t description_show(struct device_driver *drv, char *buf)
736{
737 struct serio_driver *driver = to_serio_driver(drv);
738 return sprintf(buf, "%s\n", driver->description ? driver->description : "(none)");
739}
740static DRIVER_ATTR_RO(description);
741
742static ssize_t bind_mode_show(struct device_driver *drv, char *buf)
743{
744 struct serio_driver *serio_drv = to_serio_driver(drv);
745 return sprintf(buf, "%s\n", serio_drv->manual_bind ? "manual" : "auto");
746}
747
748static ssize_t bind_mode_store(struct device_driver *drv, const char *buf, size_t count)
749{
750 struct serio_driver *serio_drv = to_serio_driver(drv);
751 int retval;
752
753 retval = count;
754 if (!strncmp(buf, "manual", count)) {
755 serio_drv->manual_bind = true;
756 } else if (!strncmp(buf, "auto", count)) {
757 serio_drv->manual_bind = false;
758 } else {
759 retval = -EINVAL;
760 }
761
762 return retval;
763}
764static DRIVER_ATTR_RW(bind_mode);
765
766static struct attribute *serio_driver_attrs[] = {
767 &driver_attr_description.attr,
768 &driver_attr_bind_mode.attr,
769 NULL,
770};
771ATTRIBUTE_GROUPS(serio_driver);
772
773static int serio_driver_probe(struct device *dev)
774{
775 struct serio *serio = to_serio_port(dev);
776 struct serio_driver *drv = to_serio_driver(dev->driver);
777
778 return serio_connect_driver(serio, drv);
779}
780
781static int serio_driver_remove(struct device *dev)
782{
783 struct serio *serio = to_serio_port(dev);
784
785 serio_disconnect_driver(serio);
786 return 0;
787}
788
789static void serio_cleanup(struct serio *serio)
790{
791 mutex_lock(&serio->drv_mutex);
792 if (serio->drv && serio->drv->cleanup)
793 serio->drv->cleanup(serio);
794 mutex_unlock(&serio->drv_mutex);
795}
796
797static void serio_shutdown(struct device *dev)
798{
799 struct serio *serio = to_serio_port(dev);
800
801 serio_cleanup(serio);
802}
803
804static void serio_attach_driver(struct serio_driver *drv)
805{
806 int error;
807
808 error = driver_attach(&drv->driver);
809 if (error)
810 pr_warn("driver_attach() failed for %s with error %d\n",
811 drv->driver.name, error);
812}
813
814int __serio_register_driver(struct serio_driver *drv, struct module *owner, const char *mod_name)
815{
816 bool manual_bind = drv->manual_bind;
817 int error;
818
819 drv->driver.bus = &serio_bus;
820 drv->driver.owner = owner;
821 drv->driver.mod_name = mod_name;
822
823 /*
824 * Temporarily disable automatic binding because probing
825 * takes long time and we are better off doing it in kseriod
826 */
827 drv->manual_bind = true;
828
829 error = driver_register(&drv->driver);
830 if (error) {
831 pr_err("driver_register() failed for %s, error: %d\n",
832 drv->driver.name, error);
833 return error;
834 }
835
836 /*
837 * Restore original bind mode and let kseriod bind the
838 * driver to free ports
839 */
840 if (!manual_bind) {
841 drv->manual_bind = false;
842 error = serio_queue_event(drv, NULL, SERIO_ATTACH_DRIVER);
843 if (error) {
844 driver_unregister(&drv->driver);
845 return error;
846 }
847 }
848
849 return 0;
850}
851EXPORT_SYMBOL(__serio_register_driver);
852
853void serio_unregister_driver(struct serio_driver *drv)
854{
855 struct serio *serio;
856
857 mutex_lock(&serio_mutex);
858
859 drv->manual_bind = true; /* so serio_find_driver ignores it */
860 serio_remove_pending_events(drv);
861
862start_over:
863 list_for_each_entry(serio, &serio_list, node) {
864 if (serio->drv == drv) {
865 serio_disconnect_port(serio);
866 serio_find_driver(serio);
867 /* we could've deleted some ports, restart */
868 goto start_over;
869 }
870 }
871
872 driver_unregister(&drv->driver);
873 mutex_unlock(&serio_mutex);
874}
875EXPORT_SYMBOL(serio_unregister_driver);
876
877static void serio_set_drv(struct serio *serio, struct serio_driver *drv)
878{
879 serio_pause_rx(serio);
880 serio->drv = drv;
881 serio_continue_rx(serio);
882}
883
884static int serio_bus_match(struct device *dev, struct device_driver *drv)
885{
886 struct serio *serio = to_serio_port(dev);
887 struct serio_driver *serio_drv = to_serio_driver(drv);
888
889 if (serio->manual_bind || serio_drv->manual_bind)
890 return 0;
891
892 return serio_match_port(serio_drv->id_table, serio);
893}
894
895#define SERIO_ADD_UEVENT_VAR(fmt, val...) \
896 do { \
897 int err = add_uevent_var(env, fmt, val); \
898 if (err) \
899 return err; \
900 } while (0)
901
902static int serio_uevent(struct device *dev, struct kobj_uevent_env *env)
903{
904 struct serio *serio;
905
906 if (!dev)
907 return -ENODEV;
908
909 serio = to_serio_port(dev);
910
911 SERIO_ADD_UEVENT_VAR("SERIO_TYPE=%02x", serio->id.type);
912 SERIO_ADD_UEVENT_VAR("SERIO_PROTO=%02x", serio->id.proto);
913 SERIO_ADD_UEVENT_VAR("SERIO_ID=%02x", serio->id.id);
914 SERIO_ADD_UEVENT_VAR("SERIO_EXTRA=%02x", serio->id.extra);
915
916 SERIO_ADD_UEVENT_VAR("MODALIAS=serio:ty%02Xpr%02Xid%02Xex%02X",
917 serio->id.type, serio->id.proto, serio->id.id, serio->id.extra);
918
919 if (serio->firmware_id[0])
920 SERIO_ADD_UEVENT_VAR("SERIO_FIRMWARE_ID=%s",
921 serio->firmware_id);
922
923 return 0;
924}
925#undef SERIO_ADD_UEVENT_VAR
926
927#ifdef CONFIG_PM
928static int serio_suspend(struct device *dev)
929{
930 struct serio *serio = to_serio_port(dev);
931
932 serio_cleanup(serio);
933
934 return 0;
935}
936
937static int serio_resume(struct device *dev)
938{
939 struct serio *serio = to_serio_port(dev);
940 int error = -ENOENT;
941
942 mutex_lock(&serio->drv_mutex);
943 if (serio->drv && serio->drv->fast_reconnect) {
944 error = serio->drv->fast_reconnect(serio);
945 if (error && error != -ENOENT)
946 dev_warn(dev, "fast reconnect failed with error %d\n",
947 error);
948 }
949 mutex_unlock(&serio->drv_mutex);
950
951 if (error) {
952 /*
953 * Driver reconnect can take a while, so better let
954 * kseriod deal with it.
955 */
956 serio_queue_event(serio, NULL, SERIO_RECONNECT_PORT);
957 }
958
959 return 0;
960}
961
962static const struct dev_pm_ops serio_pm_ops = {
963 .suspend = serio_suspend,
964 .resume = serio_resume,
965 .poweroff = serio_suspend,
966 .restore = serio_resume,
967};
968#endif /* CONFIG_PM */
969
970/* called from serio_driver->connect/disconnect methods under serio_mutex */
971int serio_open(struct serio *serio, struct serio_driver *drv)
972{
973 serio_set_drv(serio, drv);
974
975 if (serio->open && serio->open(serio)) {
976 serio_set_drv(serio, NULL);
977 return -1;
978 }
979 return 0;
980}
981EXPORT_SYMBOL(serio_open);
982
983/* called from serio_driver->connect/disconnect methods under serio_mutex */
984void serio_close(struct serio *serio)
985{
986 if (serio->close)
987 serio->close(serio);
988
989 serio_set_drv(serio, NULL);
990}
991EXPORT_SYMBOL(serio_close);
992
993irqreturn_t serio_interrupt(struct serio *serio,
994 unsigned char data, unsigned int dfl)
995{
996 unsigned long flags;
997 irqreturn_t ret = IRQ_NONE;
998
999 spin_lock_irqsave(&serio->lock, flags);
1000
1001 if (likely(serio->drv)) {
1002 ret = serio->drv->interrupt(serio, data, dfl);
1003 } else if (!dfl && device_is_registered(&serio->dev)) {
1004 serio_rescan(serio);
1005 ret = IRQ_HANDLED;
1006 }
1007
1008 spin_unlock_irqrestore(&serio->lock, flags);
1009
1010 return ret;
1011}
1012EXPORT_SYMBOL(serio_interrupt);
1013
1014struct bus_type serio_bus = {
1015 .name = "serio",
1016 .drv_groups = serio_driver_groups,
1017 .match = serio_bus_match,
1018 .uevent = serio_uevent,
1019 .probe = serio_driver_probe,
1020 .remove = serio_driver_remove,
1021 .shutdown = serio_shutdown,
1022#ifdef CONFIG_PM
1023 .pm = &serio_pm_ops,
1024#endif
1025};
1026EXPORT_SYMBOL(serio_bus);
1027
1028static int __init serio_init(void)
1029{
1030 int error;
1031
1032 error = bus_register(&serio_bus);
1033 if (error) {
1034 pr_err("Failed to register serio bus, error: %d\n", error);
1035 return error;
1036 }
1037
1038 return 0;
1039}
1040
1041static void __exit serio_exit(void)
1042{
1043 bus_unregister(&serio_bus);
1044
1045 /*
1046 * There should not be any outstanding events but work may
1047 * still be scheduled so simply cancel it.
1048 */
1049 cancel_work_sync(&serio_event_work);
1050}
1051
1052subsys_initcall(serio_init);
1053module_exit(serio_exit);