Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * The input core
4 *
5 * Copyright (c) 1999-2002 Vojtech Pavlik
6 */
7
8
9#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
10
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/idr.h>
14#include <linux/input/mt.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/major.h>
19#include <linux/proc_fs.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/poll.h>
23#include <linux/device.h>
24#include <linux/kstrtox.h>
25#include <linux/mutex.h>
26#include <linux/rcupdate.h>
27#include "input-compat.h"
28#include "input-core-private.h"
29#include "input-poller.h"
30
31MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
32MODULE_DESCRIPTION("Input core");
33MODULE_LICENSE("GPL");
34
35#define INPUT_MAX_CHAR_DEVICES 1024
36#define INPUT_FIRST_DYNAMIC_DEV 256
37static DEFINE_IDA(input_ida);
38
39static LIST_HEAD(input_dev_list);
40static LIST_HEAD(input_handler_list);
41
42/*
43 * input_mutex protects access to both input_dev_list and input_handler_list.
44 * This also causes input_[un]register_device and input_[un]register_handler
45 * be mutually exclusive which simplifies locking in drivers implementing
46 * input handlers.
47 */
48static DEFINE_MUTEX(input_mutex);
49
50static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
51
52static const unsigned int input_max_code[EV_CNT] = {
53 [EV_KEY] = KEY_MAX,
54 [EV_REL] = REL_MAX,
55 [EV_ABS] = ABS_MAX,
56 [EV_MSC] = MSC_MAX,
57 [EV_SW] = SW_MAX,
58 [EV_LED] = LED_MAX,
59 [EV_SND] = SND_MAX,
60 [EV_FF] = FF_MAX,
61};
62
63static inline int is_event_supported(unsigned int code,
64 unsigned long *bm, unsigned int max)
65{
66 return code <= max && test_bit(code, bm);
67}
68
69static int input_defuzz_abs_event(int value, int old_val, int fuzz)
70{
71 if (fuzz) {
72 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
73 return old_val;
74
75 if (value > old_val - fuzz && value < old_val + fuzz)
76 return (old_val * 3 + value) / 4;
77
78 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
79 return (old_val + value) / 2;
80 }
81
82 return value;
83}
84
85static void input_start_autorepeat(struct input_dev *dev, int code)
86{
87 if (test_bit(EV_REP, dev->evbit) &&
88 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
89 dev->timer.function) {
90 dev->repeat_key = code;
91 mod_timer(&dev->timer,
92 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
93 }
94}
95
96static void input_stop_autorepeat(struct input_dev *dev)
97{
98 del_timer(&dev->timer);
99}
100
101/*
102 * Pass event first through all filters and then, if event has not been
103 * filtered out, through all open handles. This function is called with
104 * dev->event_lock held and interrupts disabled.
105 */
106static unsigned int input_to_handler(struct input_handle *handle,
107 struct input_value *vals, unsigned int count)
108{
109 struct input_handler *handler = handle->handler;
110 struct input_value *end = vals;
111 struct input_value *v;
112
113 if (handler->filter) {
114 for (v = vals; v != vals + count; v++) {
115 if (handler->filter(handle, v->type, v->code, v->value))
116 continue;
117 if (end != v)
118 *end = *v;
119 end++;
120 }
121 count = end - vals;
122 }
123
124 if (!count)
125 return 0;
126
127 if (handler->events)
128 handler->events(handle, vals, count);
129 else if (handler->event)
130 for (v = vals; v != vals + count; v++)
131 handler->event(handle, v->type, v->code, v->value);
132
133 return count;
134}
135
136/*
137 * Pass values first through all filters and then, if event has not been
138 * filtered out, through all open handles. This function is called with
139 * dev->event_lock held and interrupts disabled.
140 */
141static void input_pass_values(struct input_dev *dev,
142 struct input_value *vals, unsigned int count)
143{
144 struct input_handle *handle;
145 struct input_value *v;
146
147 lockdep_assert_held(&dev->event_lock);
148
149 if (!count)
150 return;
151
152 rcu_read_lock();
153
154 handle = rcu_dereference(dev->grab);
155 if (handle) {
156 count = input_to_handler(handle, vals, count);
157 } else {
158 list_for_each_entry_rcu(handle, &dev->h_list, d_node)
159 if (handle->open) {
160 count = input_to_handler(handle, vals, count);
161 if (!count)
162 break;
163 }
164 }
165
166 rcu_read_unlock();
167
168 /* trigger auto repeat for key events */
169 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
170 for (v = vals; v != vals + count; v++) {
171 if (v->type == EV_KEY && v->value != 2) {
172 if (v->value)
173 input_start_autorepeat(dev, v->code);
174 else
175 input_stop_autorepeat(dev);
176 }
177 }
178 }
179}
180
181#define INPUT_IGNORE_EVENT 0
182#define INPUT_PASS_TO_HANDLERS 1
183#define INPUT_PASS_TO_DEVICE 2
184#define INPUT_SLOT 4
185#define INPUT_FLUSH 8
186#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
187
188static int input_handle_abs_event(struct input_dev *dev,
189 unsigned int code, int *pval)
190{
191 struct input_mt *mt = dev->mt;
192 bool is_mt_event;
193 int *pold;
194
195 if (code == ABS_MT_SLOT) {
196 /*
197 * "Stage" the event; we'll flush it later, when we
198 * get actual touch data.
199 */
200 if (mt && *pval >= 0 && *pval < mt->num_slots)
201 mt->slot = *pval;
202
203 return INPUT_IGNORE_EVENT;
204 }
205
206 is_mt_event = input_is_mt_value(code);
207
208 if (!is_mt_event) {
209 pold = &dev->absinfo[code].value;
210 } else if (mt) {
211 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
212 } else {
213 /*
214 * Bypass filtering for multi-touch events when
215 * not employing slots.
216 */
217 pold = NULL;
218 }
219
220 if (pold) {
221 *pval = input_defuzz_abs_event(*pval, *pold,
222 dev->absinfo[code].fuzz);
223 if (*pold == *pval)
224 return INPUT_IGNORE_EVENT;
225
226 *pold = *pval;
227 }
228
229 /* Flush pending "slot" event */
230 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
231 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
232 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
233 }
234
235 return INPUT_PASS_TO_HANDLERS;
236}
237
238static int input_get_disposition(struct input_dev *dev,
239 unsigned int type, unsigned int code, int *pval)
240{
241 int disposition = INPUT_IGNORE_EVENT;
242 int value = *pval;
243
244 /* filter-out events from inhibited devices */
245 if (dev->inhibited)
246 return INPUT_IGNORE_EVENT;
247
248 switch (type) {
249
250 case EV_SYN:
251 switch (code) {
252 case SYN_CONFIG:
253 disposition = INPUT_PASS_TO_ALL;
254 break;
255
256 case SYN_REPORT:
257 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
258 break;
259 case SYN_MT_REPORT:
260 disposition = INPUT_PASS_TO_HANDLERS;
261 break;
262 }
263 break;
264
265 case EV_KEY:
266 if (is_event_supported(code, dev->keybit, KEY_MAX)) {
267
268 /* auto-repeat bypasses state updates */
269 if (value == 2) {
270 disposition = INPUT_PASS_TO_HANDLERS;
271 break;
272 }
273
274 if (!!test_bit(code, dev->key) != !!value) {
275
276 __change_bit(code, dev->key);
277 disposition = INPUT_PASS_TO_HANDLERS;
278 }
279 }
280 break;
281
282 case EV_SW:
283 if (is_event_supported(code, dev->swbit, SW_MAX) &&
284 !!test_bit(code, dev->sw) != !!value) {
285
286 __change_bit(code, dev->sw);
287 disposition = INPUT_PASS_TO_HANDLERS;
288 }
289 break;
290
291 case EV_ABS:
292 if (is_event_supported(code, dev->absbit, ABS_MAX))
293 disposition = input_handle_abs_event(dev, code, &value);
294
295 break;
296
297 case EV_REL:
298 if (is_event_supported(code, dev->relbit, REL_MAX) && value)
299 disposition = INPUT_PASS_TO_HANDLERS;
300
301 break;
302
303 case EV_MSC:
304 if (is_event_supported(code, dev->mscbit, MSC_MAX))
305 disposition = INPUT_PASS_TO_ALL;
306
307 break;
308
309 case EV_LED:
310 if (is_event_supported(code, dev->ledbit, LED_MAX) &&
311 !!test_bit(code, dev->led) != !!value) {
312
313 __change_bit(code, dev->led);
314 disposition = INPUT_PASS_TO_ALL;
315 }
316 break;
317
318 case EV_SND:
319 if (is_event_supported(code, dev->sndbit, SND_MAX)) {
320
321 if (!!test_bit(code, dev->snd) != !!value)
322 __change_bit(code, dev->snd);
323 disposition = INPUT_PASS_TO_ALL;
324 }
325 break;
326
327 case EV_REP:
328 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
329 dev->rep[code] = value;
330 disposition = INPUT_PASS_TO_ALL;
331 }
332 break;
333
334 case EV_FF:
335 if (value >= 0)
336 disposition = INPUT_PASS_TO_ALL;
337 break;
338
339 case EV_PWR:
340 disposition = INPUT_PASS_TO_ALL;
341 break;
342 }
343
344 *pval = value;
345 return disposition;
346}
347
348static void input_event_dispose(struct input_dev *dev, int disposition,
349 unsigned int type, unsigned int code, int value)
350{
351 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
352 dev->event(dev, type, code, value);
353
354 if (!dev->vals)
355 return;
356
357 if (disposition & INPUT_PASS_TO_HANDLERS) {
358 struct input_value *v;
359
360 if (disposition & INPUT_SLOT) {
361 v = &dev->vals[dev->num_vals++];
362 v->type = EV_ABS;
363 v->code = ABS_MT_SLOT;
364 v->value = dev->mt->slot;
365 }
366
367 v = &dev->vals[dev->num_vals++];
368 v->type = type;
369 v->code = code;
370 v->value = value;
371 }
372
373 if (disposition & INPUT_FLUSH) {
374 if (dev->num_vals >= 2)
375 input_pass_values(dev, dev->vals, dev->num_vals);
376 dev->num_vals = 0;
377 /*
378 * Reset the timestamp on flush so we won't end up
379 * with a stale one. Note we only need to reset the
380 * monolithic one as we use its presence when deciding
381 * whether to generate a synthetic timestamp.
382 */
383 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0);
384 } else if (dev->num_vals >= dev->max_vals - 2) {
385 dev->vals[dev->num_vals++] = input_value_sync;
386 input_pass_values(dev, dev->vals, dev->num_vals);
387 dev->num_vals = 0;
388 }
389}
390
391void input_handle_event(struct input_dev *dev,
392 unsigned int type, unsigned int code, int value)
393{
394 int disposition;
395
396 lockdep_assert_held(&dev->event_lock);
397
398 disposition = input_get_disposition(dev, type, code, &value);
399 if (disposition != INPUT_IGNORE_EVENT) {
400 if (type != EV_SYN)
401 add_input_randomness(type, code, value);
402
403 input_event_dispose(dev, disposition, type, code, value);
404 }
405}
406
407/**
408 * input_event() - report new input event
409 * @dev: device that generated the event
410 * @type: type of the event
411 * @code: event code
412 * @value: value of the event
413 *
414 * This function should be used by drivers implementing various input
415 * devices to report input events. See also input_inject_event().
416 *
417 * NOTE: input_event() may be safely used right after input device was
418 * allocated with input_allocate_device(), even before it is registered
419 * with input_register_device(), but the event will not reach any of the
420 * input handlers. Such early invocation of input_event() may be used
421 * to 'seed' initial state of a switch or initial position of absolute
422 * axis, etc.
423 */
424void input_event(struct input_dev *dev,
425 unsigned int type, unsigned int code, int value)
426{
427 unsigned long flags;
428
429 if (is_event_supported(type, dev->evbit, EV_MAX)) {
430
431 spin_lock_irqsave(&dev->event_lock, flags);
432 input_handle_event(dev, type, code, value);
433 spin_unlock_irqrestore(&dev->event_lock, flags);
434 }
435}
436EXPORT_SYMBOL(input_event);
437
438/**
439 * input_inject_event() - send input event from input handler
440 * @handle: input handle to send event through
441 * @type: type of the event
442 * @code: event code
443 * @value: value of the event
444 *
445 * Similar to input_event() but will ignore event if device is
446 * "grabbed" and handle injecting event is not the one that owns
447 * the device.
448 */
449void input_inject_event(struct input_handle *handle,
450 unsigned int type, unsigned int code, int value)
451{
452 struct input_dev *dev = handle->dev;
453 struct input_handle *grab;
454 unsigned long flags;
455
456 if (is_event_supported(type, dev->evbit, EV_MAX)) {
457 spin_lock_irqsave(&dev->event_lock, flags);
458
459 rcu_read_lock();
460 grab = rcu_dereference(dev->grab);
461 if (!grab || grab == handle)
462 input_handle_event(dev, type, code, value);
463 rcu_read_unlock();
464
465 spin_unlock_irqrestore(&dev->event_lock, flags);
466 }
467}
468EXPORT_SYMBOL(input_inject_event);
469
470/**
471 * input_alloc_absinfo - allocates array of input_absinfo structs
472 * @dev: the input device emitting absolute events
473 *
474 * If the absinfo struct the caller asked for is already allocated, this
475 * functions will not do anything.
476 */
477void input_alloc_absinfo(struct input_dev *dev)
478{
479 if (dev->absinfo)
480 return;
481
482 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
483 if (!dev->absinfo) {
484 dev_err(dev->dev.parent ?: &dev->dev,
485 "%s: unable to allocate memory\n", __func__);
486 /*
487 * We will handle this allocation failure in
488 * input_register_device() when we refuse to register input
489 * device with ABS bits but without absinfo.
490 */
491 }
492}
493EXPORT_SYMBOL(input_alloc_absinfo);
494
495void input_set_abs_params(struct input_dev *dev, unsigned int axis,
496 int min, int max, int fuzz, int flat)
497{
498 struct input_absinfo *absinfo;
499
500 __set_bit(EV_ABS, dev->evbit);
501 __set_bit(axis, dev->absbit);
502
503 input_alloc_absinfo(dev);
504 if (!dev->absinfo)
505 return;
506
507 absinfo = &dev->absinfo[axis];
508 absinfo->minimum = min;
509 absinfo->maximum = max;
510 absinfo->fuzz = fuzz;
511 absinfo->flat = flat;
512}
513EXPORT_SYMBOL(input_set_abs_params);
514
515/**
516 * input_copy_abs - Copy absinfo from one input_dev to another
517 * @dst: Destination input device to copy the abs settings to
518 * @dst_axis: ABS_* value selecting the destination axis
519 * @src: Source input device to copy the abs settings from
520 * @src_axis: ABS_* value selecting the source axis
521 *
522 * Set absinfo for the selected destination axis by copying it from
523 * the specified source input device's source axis.
524 * This is useful to e.g. setup a pen/stylus input-device for combined
525 * touchscreen/pen hardware where the pen uses the same coordinates as
526 * the touchscreen.
527 */
528void input_copy_abs(struct input_dev *dst, unsigned int dst_axis,
529 const struct input_dev *src, unsigned int src_axis)
530{
531 /* src must have EV_ABS and src_axis set */
532 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) &&
533 test_bit(src_axis, src->absbit))))
534 return;
535
536 /*
537 * input_alloc_absinfo() may have failed for the source. Our caller is
538 * expected to catch this when registering the input devices, which may
539 * happen after the input_copy_abs() call.
540 */
541 if (!src->absinfo)
542 return;
543
544 input_set_capability(dst, EV_ABS, dst_axis);
545 if (!dst->absinfo)
546 return;
547
548 dst->absinfo[dst_axis] = src->absinfo[src_axis];
549}
550EXPORT_SYMBOL(input_copy_abs);
551
552/**
553 * input_grab_device - grabs device for exclusive use
554 * @handle: input handle that wants to own the device
555 *
556 * When a device is grabbed by an input handle all events generated by
557 * the device are delivered only to this handle. Also events injected
558 * by other input handles are ignored while device is grabbed.
559 */
560int input_grab_device(struct input_handle *handle)
561{
562 struct input_dev *dev = handle->dev;
563 int retval;
564
565 retval = mutex_lock_interruptible(&dev->mutex);
566 if (retval)
567 return retval;
568
569 if (dev->grab) {
570 retval = -EBUSY;
571 goto out;
572 }
573
574 rcu_assign_pointer(dev->grab, handle);
575
576 out:
577 mutex_unlock(&dev->mutex);
578 return retval;
579}
580EXPORT_SYMBOL(input_grab_device);
581
582static void __input_release_device(struct input_handle *handle)
583{
584 struct input_dev *dev = handle->dev;
585 struct input_handle *grabber;
586
587 grabber = rcu_dereference_protected(dev->grab,
588 lockdep_is_held(&dev->mutex));
589 if (grabber == handle) {
590 rcu_assign_pointer(dev->grab, NULL);
591 /* Make sure input_pass_values() notices that grab is gone */
592 synchronize_rcu();
593
594 list_for_each_entry(handle, &dev->h_list, d_node)
595 if (handle->open && handle->handler->start)
596 handle->handler->start(handle);
597 }
598}
599
600/**
601 * input_release_device - release previously grabbed device
602 * @handle: input handle that owns the device
603 *
604 * Releases previously grabbed device so that other input handles can
605 * start receiving input events. Upon release all handlers attached
606 * to the device have their start() method called so they have a change
607 * to synchronize device state with the rest of the system.
608 */
609void input_release_device(struct input_handle *handle)
610{
611 struct input_dev *dev = handle->dev;
612
613 mutex_lock(&dev->mutex);
614 __input_release_device(handle);
615 mutex_unlock(&dev->mutex);
616}
617EXPORT_SYMBOL(input_release_device);
618
619/**
620 * input_open_device - open input device
621 * @handle: handle through which device is being accessed
622 *
623 * This function should be called by input handlers when they
624 * want to start receive events from given input device.
625 */
626int input_open_device(struct input_handle *handle)
627{
628 struct input_dev *dev = handle->dev;
629 int retval;
630
631 retval = mutex_lock_interruptible(&dev->mutex);
632 if (retval)
633 return retval;
634
635 if (dev->going_away) {
636 retval = -ENODEV;
637 goto out;
638 }
639
640 handle->open++;
641
642 if (dev->users++ || dev->inhibited) {
643 /*
644 * Device is already opened and/or inhibited,
645 * so we can exit immediately and report success.
646 */
647 goto out;
648 }
649
650 if (dev->open) {
651 retval = dev->open(dev);
652 if (retval) {
653 dev->users--;
654 handle->open--;
655 /*
656 * Make sure we are not delivering any more events
657 * through this handle
658 */
659 synchronize_rcu();
660 goto out;
661 }
662 }
663
664 if (dev->poller)
665 input_dev_poller_start(dev->poller);
666
667 out:
668 mutex_unlock(&dev->mutex);
669 return retval;
670}
671EXPORT_SYMBOL(input_open_device);
672
673int input_flush_device(struct input_handle *handle, struct file *file)
674{
675 struct input_dev *dev = handle->dev;
676 int retval;
677
678 retval = mutex_lock_interruptible(&dev->mutex);
679 if (retval)
680 return retval;
681
682 if (dev->flush)
683 retval = dev->flush(dev, file);
684
685 mutex_unlock(&dev->mutex);
686 return retval;
687}
688EXPORT_SYMBOL(input_flush_device);
689
690/**
691 * input_close_device - close input device
692 * @handle: handle through which device is being accessed
693 *
694 * This function should be called by input handlers when they
695 * want to stop receive events from given input device.
696 */
697void input_close_device(struct input_handle *handle)
698{
699 struct input_dev *dev = handle->dev;
700
701 mutex_lock(&dev->mutex);
702
703 __input_release_device(handle);
704
705 if (!dev->inhibited && !--dev->users) {
706 if (dev->poller)
707 input_dev_poller_stop(dev->poller);
708 if (dev->close)
709 dev->close(dev);
710 }
711
712 if (!--handle->open) {
713 /*
714 * synchronize_rcu() makes sure that input_pass_values()
715 * completed and that no more input events are delivered
716 * through this handle
717 */
718 synchronize_rcu();
719 }
720
721 mutex_unlock(&dev->mutex);
722}
723EXPORT_SYMBOL(input_close_device);
724
725/*
726 * Simulate keyup events for all keys that are marked as pressed.
727 * The function must be called with dev->event_lock held.
728 */
729static bool input_dev_release_keys(struct input_dev *dev)
730{
731 bool need_sync = false;
732 int code;
733
734 lockdep_assert_held(&dev->event_lock);
735
736 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
737 for_each_set_bit(code, dev->key, KEY_CNT) {
738 input_handle_event(dev, EV_KEY, code, 0);
739 need_sync = true;
740 }
741 }
742
743 return need_sync;
744}
745
746/*
747 * Prepare device for unregistering
748 */
749static void input_disconnect_device(struct input_dev *dev)
750{
751 struct input_handle *handle;
752
753 /*
754 * Mark device as going away. Note that we take dev->mutex here
755 * not to protect access to dev->going_away but rather to ensure
756 * that there are no threads in the middle of input_open_device()
757 */
758 mutex_lock(&dev->mutex);
759 dev->going_away = true;
760 mutex_unlock(&dev->mutex);
761
762 spin_lock_irq(&dev->event_lock);
763
764 /*
765 * Simulate keyup events for all pressed keys so that handlers
766 * are not left with "stuck" keys. The driver may continue
767 * generate events even after we done here but they will not
768 * reach any handlers.
769 */
770 if (input_dev_release_keys(dev))
771 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
772
773 list_for_each_entry(handle, &dev->h_list, d_node)
774 handle->open = 0;
775
776 spin_unlock_irq(&dev->event_lock);
777}
778
779/**
780 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
781 * @ke: keymap entry containing scancode to be converted.
782 * @scancode: pointer to the location where converted scancode should
783 * be stored.
784 *
785 * This function is used to convert scancode stored in &struct keymap_entry
786 * into scalar form understood by legacy keymap handling methods. These
787 * methods expect scancodes to be represented as 'unsigned int'.
788 */
789int input_scancode_to_scalar(const struct input_keymap_entry *ke,
790 unsigned int *scancode)
791{
792 switch (ke->len) {
793 case 1:
794 *scancode = *((u8 *)ke->scancode);
795 break;
796
797 case 2:
798 *scancode = *((u16 *)ke->scancode);
799 break;
800
801 case 4:
802 *scancode = *((u32 *)ke->scancode);
803 break;
804
805 default:
806 return -EINVAL;
807 }
808
809 return 0;
810}
811EXPORT_SYMBOL(input_scancode_to_scalar);
812
813/*
814 * Those routines handle the default case where no [gs]etkeycode() is
815 * defined. In this case, an array indexed by the scancode is used.
816 */
817
818static unsigned int input_fetch_keycode(struct input_dev *dev,
819 unsigned int index)
820{
821 switch (dev->keycodesize) {
822 case 1:
823 return ((u8 *)dev->keycode)[index];
824
825 case 2:
826 return ((u16 *)dev->keycode)[index];
827
828 default:
829 return ((u32 *)dev->keycode)[index];
830 }
831}
832
833static int input_default_getkeycode(struct input_dev *dev,
834 struct input_keymap_entry *ke)
835{
836 unsigned int index;
837 int error;
838
839 if (!dev->keycodesize)
840 return -EINVAL;
841
842 if (ke->flags & INPUT_KEYMAP_BY_INDEX)
843 index = ke->index;
844 else {
845 error = input_scancode_to_scalar(ke, &index);
846 if (error)
847 return error;
848 }
849
850 if (index >= dev->keycodemax)
851 return -EINVAL;
852
853 ke->keycode = input_fetch_keycode(dev, index);
854 ke->index = index;
855 ke->len = sizeof(index);
856 memcpy(ke->scancode, &index, sizeof(index));
857
858 return 0;
859}
860
861static int input_default_setkeycode(struct input_dev *dev,
862 const struct input_keymap_entry *ke,
863 unsigned int *old_keycode)
864{
865 unsigned int index;
866 int error;
867 int i;
868
869 if (!dev->keycodesize)
870 return -EINVAL;
871
872 if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
873 index = ke->index;
874 } else {
875 error = input_scancode_to_scalar(ke, &index);
876 if (error)
877 return error;
878 }
879
880 if (index >= dev->keycodemax)
881 return -EINVAL;
882
883 if (dev->keycodesize < sizeof(ke->keycode) &&
884 (ke->keycode >> (dev->keycodesize * 8)))
885 return -EINVAL;
886
887 switch (dev->keycodesize) {
888 case 1: {
889 u8 *k = (u8 *)dev->keycode;
890 *old_keycode = k[index];
891 k[index] = ke->keycode;
892 break;
893 }
894 case 2: {
895 u16 *k = (u16 *)dev->keycode;
896 *old_keycode = k[index];
897 k[index] = ke->keycode;
898 break;
899 }
900 default: {
901 u32 *k = (u32 *)dev->keycode;
902 *old_keycode = k[index];
903 k[index] = ke->keycode;
904 break;
905 }
906 }
907
908 if (*old_keycode <= KEY_MAX) {
909 __clear_bit(*old_keycode, dev->keybit);
910 for (i = 0; i < dev->keycodemax; i++) {
911 if (input_fetch_keycode(dev, i) == *old_keycode) {
912 __set_bit(*old_keycode, dev->keybit);
913 /* Setting the bit twice is useless, so break */
914 break;
915 }
916 }
917 }
918
919 __set_bit(ke->keycode, dev->keybit);
920 return 0;
921}
922
923/**
924 * input_get_keycode - retrieve keycode currently mapped to a given scancode
925 * @dev: input device which keymap is being queried
926 * @ke: keymap entry
927 *
928 * This function should be called by anyone interested in retrieving current
929 * keymap. Presently evdev handlers use it.
930 */
931int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
932{
933 unsigned long flags;
934 int retval;
935
936 spin_lock_irqsave(&dev->event_lock, flags);
937 retval = dev->getkeycode(dev, ke);
938 spin_unlock_irqrestore(&dev->event_lock, flags);
939
940 return retval;
941}
942EXPORT_SYMBOL(input_get_keycode);
943
944/**
945 * input_set_keycode - attribute a keycode to a given scancode
946 * @dev: input device which keymap is being updated
947 * @ke: new keymap entry
948 *
949 * This function should be called by anyone needing to update current
950 * keymap. Presently keyboard and evdev handlers use it.
951 */
952int input_set_keycode(struct input_dev *dev,
953 const struct input_keymap_entry *ke)
954{
955 unsigned long flags;
956 unsigned int old_keycode;
957 int retval;
958
959 if (ke->keycode > KEY_MAX)
960 return -EINVAL;
961
962 spin_lock_irqsave(&dev->event_lock, flags);
963
964 retval = dev->setkeycode(dev, ke, &old_keycode);
965 if (retval)
966 goto out;
967
968 /* Make sure KEY_RESERVED did not get enabled. */
969 __clear_bit(KEY_RESERVED, dev->keybit);
970
971 /*
972 * Simulate keyup event if keycode is not present
973 * in the keymap anymore
974 */
975 if (old_keycode > KEY_MAX) {
976 dev_warn(dev->dev.parent ?: &dev->dev,
977 "%s: got too big old keycode %#x\n",
978 __func__, old_keycode);
979 } else if (test_bit(EV_KEY, dev->evbit) &&
980 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
981 __test_and_clear_bit(old_keycode, dev->key)) {
982 /*
983 * We have to use input_event_dispose() here directly instead
984 * of input_handle_event() because the key we want to release
985 * here is considered no longer supported by the device and
986 * input_handle_event() will ignore it.
987 */
988 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS,
989 EV_KEY, old_keycode, 0);
990 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH,
991 EV_SYN, SYN_REPORT, 1);
992 }
993
994 out:
995 spin_unlock_irqrestore(&dev->event_lock, flags);
996
997 return retval;
998}
999EXPORT_SYMBOL(input_set_keycode);
1000
1001bool input_match_device_id(const struct input_dev *dev,
1002 const struct input_device_id *id)
1003{
1004 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
1005 if (id->bustype != dev->id.bustype)
1006 return false;
1007
1008 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
1009 if (id->vendor != dev->id.vendor)
1010 return false;
1011
1012 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
1013 if (id->product != dev->id.product)
1014 return false;
1015
1016 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
1017 if (id->version != dev->id.version)
1018 return false;
1019
1020 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
1021 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
1022 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
1023 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
1024 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
1025 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
1026 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
1027 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
1028 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
1029 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
1030 return false;
1031 }
1032
1033 return true;
1034}
1035EXPORT_SYMBOL(input_match_device_id);
1036
1037static const struct input_device_id *input_match_device(struct input_handler *handler,
1038 struct input_dev *dev)
1039{
1040 const struct input_device_id *id;
1041
1042 for (id = handler->id_table; id->flags || id->driver_info; id++) {
1043 if (input_match_device_id(dev, id) &&
1044 (!handler->match || handler->match(handler, dev))) {
1045 return id;
1046 }
1047 }
1048
1049 return NULL;
1050}
1051
1052static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
1053{
1054 const struct input_device_id *id;
1055 int error;
1056
1057 id = input_match_device(handler, dev);
1058 if (!id)
1059 return -ENODEV;
1060
1061 error = handler->connect(handler, dev, id);
1062 if (error && error != -ENODEV)
1063 pr_err("failed to attach handler %s to device %s, error: %d\n",
1064 handler->name, kobject_name(&dev->dev.kobj), error);
1065
1066 return error;
1067}
1068
1069#ifdef CONFIG_COMPAT
1070
1071static int input_bits_to_string(char *buf, int buf_size,
1072 unsigned long bits, bool skip_empty)
1073{
1074 int len = 0;
1075
1076 if (in_compat_syscall()) {
1077 u32 dword = bits >> 32;
1078 if (dword || !skip_empty)
1079 len += snprintf(buf, buf_size, "%x ", dword);
1080
1081 dword = bits & 0xffffffffUL;
1082 if (dword || !skip_empty || len)
1083 len += snprintf(buf + len, max(buf_size - len, 0),
1084 "%x", dword);
1085 } else {
1086 if (bits || !skip_empty)
1087 len += snprintf(buf, buf_size, "%lx", bits);
1088 }
1089
1090 return len;
1091}
1092
1093#else /* !CONFIG_COMPAT */
1094
1095static int input_bits_to_string(char *buf, int buf_size,
1096 unsigned long bits, bool skip_empty)
1097{
1098 return bits || !skip_empty ?
1099 snprintf(buf, buf_size, "%lx", bits) : 0;
1100}
1101
1102#endif
1103
1104#ifdef CONFIG_PROC_FS
1105
1106static struct proc_dir_entry *proc_bus_input_dir;
1107static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1108static int input_devices_state;
1109
1110static inline void input_wakeup_procfs_readers(void)
1111{
1112 input_devices_state++;
1113 wake_up(&input_devices_poll_wait);
1114}
1115
1116static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
1117{
1118 poll_wait(file, &input_devices_poll_wait, wait);
1119 if (file->f_version != input_devices_state) {
1120 file->f_version = input_devices_state;
1121 return EPOLLIN | EPOLLRDNORM;
1122 }
1123
1124 return 0;
1125}
1126
1127union input_seq_state {
1128 struct {
1129 unsigned short pos;
1130 bool mutex_acquired;
1131 };
1132 void *p;
1133};
1134
1135static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1136{
1137 union input_seq_state *state = (union input_seq_state *)&seq->private;
1138 int error;
1139
1140 /* We need to fit into seq->private pointer */
1141 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1142
1143 error = mutex_lock_interruptible(&input_mutex);
1144 if (error) {
1145 state->mutex_acquired = false;
1146 return ERR_PTR(error);
1147 }
1148
1149 state->mutex_acquired = true;
1150
1151 return seq_list_start(&input_dev_list, *pos);
1152}
1153
1154static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1155{
1156 return seq_list_next(v, &input_dev_list, pos);
1157}
1158
1159static void input_seq_stop(struct seq_file *seq, void *v)
1160{
1161 union input_seq_state *state = (union input_seq_state *)&seq->private;
1162
1163 if (state->mutex_acquired)
1164 mutex_unlock(&input_mutex);
1165}
1166
1167static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1168 unsigned long *bitmap, int max)
1169{
1170 int i;
1171 bool skip_empty = true;
1172 char buf[18];
1173
1174 seq_printf(seq, "B: %s=", name);
1175
1176 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1177 if (input_bits_to_string(buf, sizeof(buf),
1178 bitmap[i], skip_empty)) {
1179 skip_empty = false;
1180 seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1181 }
1182 }
1183
1184 /*
1185 * If no output was produced print a single 0.
1186 */
1187 if (skip_empty)
1188 seq_putc(seq, '0');
1189
1190 seq_putc(seq, '\n');
1191}
1192
1193static int input_devices_seq_show(struct seq_file *seq, void *v)
1194{
1195 struct input_dev *dev = container_of(v, struct input_dev, node);
1196 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1197 struct input_handle *handle;
1198
1199 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1200 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1201
1202 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1203 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1204 seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1205 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1206 seq_puts(seq, "H: Handlers=");
1207
1208 list_for_each_entry(handle, &dev->h_list, d_node)
1209 seq_printf(seq, "%s ", handle->name);
1210 seq_putc(seq, '\n');
1211
1212 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1213
1214 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1215 if (test_bit(EV_KEY, dev->evbit))
1216 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1217 if (test_bit(EV_REL, dev->evbit))
1218 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1219 if (test_bit(EV_ABS, dev->evbit))
1220 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1221 if (test_bit(EV_MSC, dev->evbit))
1222 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1223 if (test_bit(EV_LED, dev->evbit))
1224 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1225 if (test_bit(EV_SND, dev->evbit))
1226 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1227 if (test_bit(EV_FF, dev->evbit))
1228 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1229 if (test_bit(EV_SW, dev->evbit))
1230 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1231
1232 seq_putc(seq, '\n');
1233
1234 kfree(path);
1235 return 0;
1236}
1237
1238static const struct seq_operations input_devices_seq_ops = {
1239 .start = input_devices_seq_start,
1240 .next = input_devices_seq_next,
1241 .stop = input_seq_stop,
1242 .show = input_devices_seq_show,
1243};
1244
1245static int input_proc_devices_open(struct inode *inode, struct file *file)
1246{
1247 return seq_open(file, &input_devices_seq_ops);
1248}
1249
1250static const struct proc_ops input_devices_proc_ops = {
1251 .proc_open = input_proc_devices_open,
1252 .proc_poll = input_proc_devices_poll,
1253 .proc_read = seq_read,
1254 .proc_lseek = seq_lseek,
1255 .proc_release = seq_release,
1256};
1257
1258static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1259{
1260 union input_seq_state *state = (union input_seq_state *)&seq->private;
1261 int error;
1262
1263 /* We need to fit into seq->private pointer */
1264 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1265
1266 error = mutex_lock_interruptible(&input_mutex);
1267 if (error) {
1268 state->mutex_acquired = false;
1269 return ERR_PTR(error);
1270 }
1271
1272 state->mutex_acquired = true;
1273 state->pos = *pos;
1274
1275 return seq_list_start(&input_handler_list, *pos);
1276}
1277
1278static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1279{
1280 union input_seq_state *state = (union input_seq_state *)&seq->private;
1281
1282 state->pos = *pos + 1;
1283 return seq_list_next(v, &input_handler_list, pos);
1284}
1285
1286static int input_handlers_seq_show(struct seq_file *seq, void *v)
1287{
1288 struct input_handler *handler = container_of(v, struct input_handler, node);
1289 union input_seq_state *state = (union input_seq_state *)&seq->private;
1290
1291 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1292 if (handler->filter)
1293 seq_puts(seq, " (filter)");
1294 if (handler->legacy_minors)
1295 seq_printf(seq, " Minor=%d", handler->minor);
1296 seq_putc(seq, '\n');
1297
1298 return 0;
1299}
1300
1301static const struct seq_operations input_handlers_seq_ops = {
1302 .start = input_handlers_seq_start,
1303 .next = input_handlers_seq_next,
1304 .stop = input_seq_stop,
1305 .show = input_handlers_seq_show,
1306};
1307
1308static int input_proc_handlers_open(struct inode *inode, struct file *file)
1309{
1310 return seq_open(file, &input_handlers_seq_ops);
1311}
1312
1313static const struct proc_ops input_handlers_proc_ops = {
1314 .proc_open = input_proc_handlers_open,
1315 .proc_read = seq_read,
1316 .proc_lseek = seq_lseek,
1317 .proc_release = seq_release,
1318};
1319
1320static int __init input_proc_init(void)
1321{
1322 struct proc_dir_entry *entry;
1323
1324 proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1325 if (!proc_bus_input_dir)
1326 return -ENOMEM;
1327
1328 entry = proc_create("devices", 0, proc_bus_input_dir,
1329 &input_devices_proc_ops);
1330 if (!entry)
1331 goto fail1;
1332
1333 entry = proc_create("handlers", 0, proc_bus_input_dir,
1334 &input_handlers_proc_ops);
1335 if (!entry)
1336 goto fail2;
1337
1338 return 0;
1339
1340 fail2: remove_proc_entry("devices", proc_bus_input_dir);
1341 fail1: remove_proc_entry("bus/input", NULL);
1342 return -ENOMEM;
1343}
1344
1345static void input_proc_exit(void)
1346{
1347 remove_proc_entry("devices", proc_bus_input_dir);
1348 remove_proc_entry("handlers", proc_bus_input_dir);
1349 remove_proc_entry("bus/input", NULL);
1350}
1351
1352#else /* !CONFIG_PROC_FS */
1353static inline void input_wakeup_procfs_readers(void) { }
1354static inline int input_proc_init(void) { return 0; }
1355static inline void input_proc_exit(void) { }
1356#endif
1357
1358#define INPUT_DEV_STRING_ATTR_SHOW(name) \
1359static ssize_t input_dev_show_##name(struct device *dev, \
1360 struct device_attribute *attr, \
1361 char *buf) \
1362{ \
1363 struct input_dev *input_dev = to_input_dev(dev); \
1364 \
1365 return scnprintf(buf, PAGE_SIZE, "%s\n", \
1366 input_dev->name ? input_dev->name : ""); \
1367} \
1368static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1369
1370INPUT_DEV_STRING_ATTR_SHOW(name);
1371INPUT_DEV_STRING_ATTR_SHOW(phys);
1372INPUT_DEV_STRING_ATTR_SHOW(uniq);
1373
1374static int input_print_modalias_bits(char *buf, int size,
1375 char name, unsigned long *bm,
1376 unsigned int min_bit, unsigned int max_bit)
1377{
1378 int len = 0, i;
1379
1380 len += snprintf(buf, max(size, 0), "%c", name);
1381 for (i = min_bit; i < max_bit; i++)
1382 if (bm[BIT_WORD(i)] & BIT_MASK(i))
1383 len += snprintf(buf + len, max(size - len, 0), "%X,", i);
1384 return len;
1385}
1386
1387static int input_print_modalias(char *buf, int size, struct input_dev *id,
1388 int add_cr)
1389{
1390 int len;
1391
1392 len = snprintf(buf, max(size, 0),
1393 "input:b%04Xv%04Xp%04Xe%04X-",
1394 id->id.bustype, id->id.vendor,
1395 id->id.product, id->id.version);
1396
1397 len += input_print_modalias_bits(buf + len, size - len,
1398 'e', id->evbit, 0, EV_MAX);
1399 len += input_print_modalias_bits(buf + len, size - len,
1400 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1401 len += input_print_modalias_bits(buf + len, size - len,
1402 'r', id->relbit, 0, REL_MAX);
1403 len += input_print_modalias_bits(buf + len, size - len,
1404 'a', id->absbit, 0, ABS_MAX);
1405 len += input_print_modalias_bits(buf + len, size - len,
1406 'm', id->mscbit, 0, MSC_MAX);
1407 len += input_print_modalias_bits(buf + len, size - len,
1408 'l', id->ledbit, 0, LED_MAX);
1409 len += input_print_modalias_bits(buf + len, size - len,
1410 's', id->sndbit, 0, SND_MAX);
1411 len += input_print_modalias_bits(buf + len, size - len,
1412 'f', id->ffbit, 0, FF_MAX);
1413 len += input_print_modalias_bits(buf + len, size - len,
1414 'w', id->swbit, 0, SW_MAX);
1415
1416 if (add_cr)
1417 len += snprintf(buf + len, max(size - len, 0), "\n");
1418
1419 return len;
1420}
1421
1422static ssize_t input_dev_show_modalias(struct device *dev,
1423 struct device_attribute *attr,
1424 char *buf)
1425{
1426 struct input_dev *id = to_input_dev(dev);
1427 ssize_t len;
1428
1429 len = input_print_modalias(buf, PAGE_SIZE, id, 1);
1430
1431 return min_t(int, len, PAGE_SIZE);
1432}
1433static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1434
1435static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1436 int max, int add_cr);
1437
1438static ssize_t input_dev_show_properties(struct device *dev,
1439 struct device_attribute *attr,
1440 char *buf)
1441{
1442 struct input_dev *input_dev = to_input_dev(dev);
1443 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1444 INPUT_PROP_MAX, true);
1445 return min_t(int, len, PAGE_SIZE);
1446}
1447static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1448
1449static int input_inhibit_device(struct input_dev *dev);
1450static int input_uninhibit_device(struct input_dev *dev);
1451
1452static ssize_t inhibited_show(struct device *dev,
1453 struct device_attribute *attr,
1454 char *buf)
1455{
1456 struct input_dev *input_dev = to_input_dev(dev);
1457
1458 return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited);
1459}
1460
1461static ssize_t inhibited_store(struct device *dev,
1462 struct device_attribute *attr, const char *buf,
1463 size_t len)
1464{
1465 struct input_dev *input_dev = to_input_dev(dev);
1466 ssize_t rv;
1467 bool inhibited;
1468
1469 if (kstrtobool(buf, &inhibited))
1470 return -EINVAL;
1471
1472 if (inhibited)
1473 rv = input_inhibit_device(input_dev);
1474 else
1475 rv = input_uninhibit_device(input_dev);
1476
1477 if (rv != 0)
1478 return rv;
1479
1480 return len;
1481}
1482
1483static DEVICE_ATTR_RW(inhibited);
1484
1485static struct attribute *input_dev_attrs[] = {
1486 &dev_attr_name.attr,
1487 &dev_attr_phys.attr,
1488 &dev_attr_uniq.attr,
1489 &dev_attr_modalias.attr,
1490 &dev_attr_properties.attr,
1491 &dev_attr_inhibited.attr,
1492 NULL
1493};
1494
1495static const struct attribute_group input_dev_attr_group = {
1496 .attrs = input_dev_attrs,
1497};
1498
1499#define INPUT_DEV_ID_ATTR(name) \
1500static ssize_t input_dev_show_id_##name(struct device *dev, \
1501 struct device_attribute *attr, \
1502 char *buf) \
1503{ \
1504 struct input_dev *input_dev = to_input_dev(dev); \
1505 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
1506} \
1507static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1508
1509INPUT_DEV_ID_ATTR(bustype);
1510INPUT_DEV_ID_ATTR(vendor);
1511INPUT_DEV_ID_ATTR(product);
1512INPUT_DEV_ID_ATTR(version);
1513
1514static struct attribute *input_dev_id_attrs[] = {
1515 &dev_attr_bustype.attr,
1516 &dev_attr_vendor.attr,
1517 &dev_attr_product.attr,
1518 &dev_attr_version.attr,
1519 NULL
1520};
1521
1522static const struct attribute_group input_dev_id_attr_group = {
1523 .name = "id",
1524 .attrs = input_dev_id_attrs,
1525};
1526
1527static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1528 int max, int add_cr)
1529{
1530 int i;
1531 int len = 0;
1532 bool skip_empty = true;
1533
1534 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1535 len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1536 bitmap[i], skip_empty);
1537 if (len) {
1538 skip_empty = false;
1539 if (i > 0)
1540 len += snprintf(buf + len, max(buf_size - len, 0), " ");
1541 }
1542 }
1543
1544 /*
1545 * If no output was produced print a single 0.
1546 */
1547 if (len == 0)
1548 len = snprintf(buf, buf_size, "%d", 0);
1549
1550 if (add_cr)
1551 len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1552
1553 return len;
1554}
1555
1556#define INPUT_DEV_CAP_ATTR(ev, bm) \
1557static ssize_t input_dev_show_cap_##bm(struct device *dev, \
1558 struct device_attribute *attr, \
1559 char *buf) \
1560{ \
1561 struct input_dev *input_dev = to_input_dev(dev); \
1562 int len = input_print_bitmap(buf, PAGE_SIZE, \
1563 input_dev->bm##bit, ev##_MAX, \
1564 true); \
1565 return min_t(int, len, PAGE_SIZE); \
1566} \
1567static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1568
1569INPUT_DEV_CAP_ATTR(EV, ev);
1570INPUT_DEV_CAP_ATTR(KEY, key);
1571INPUT_DEV_CAP_ATTR(REL, rel);
1572INPUT_DEV_CAP_ATTR(ABS, abs);
1573INPUT_DEV_CAP_ATTR(MSC, msc);
1574INPUT_DEV_CAP_ATTR(LED, led);
1575INPUT_DEV_CAP_ATTR(SND, snd);
1576INPUT_DEV_CAP_ATTR(FF, ff);
1577INPUT_DEV_CAP_ATTR(SW, sw);
1578
1579static struct attribute *input_dev_caps_attrs[] = {
1580 &dev_attr_ev.attr,
1581 &dev_attr_key.attr,
1582 &dev_attr_rel.attr,
1583 &dev_attr_abs.attr,
1584 &dev_attr_msc.attr,
1585 &dev_attr_led.attr,
1586 &dev_attr_snd.attr,
1587 &dev_attr_ff.attr,
1588 &dev_attr_sw.attr,
1589 NULL
1590};
1591
1592static const struct attribute_group input_dev_caps_attr_group = {
1593 .name = "capabilities",
1594 .attrs = input_dev_caps_attrs,
1595};
1596
1597static const struct attribute_group *input_dev_attr_groups[] = {
1598 &input_dev_attr_group,
1599 &input_dev_id_attr_group,
1600 &input_dev_caps_attr_group,
1601 &input_poller_attribute_group,
1602 NULL
1603};
1604
1605static void input_dev_release(struct device *device)
1606{
1607 struct input_dev *dev = to_input_dev(device);
1608
1609 input_ff_destroy(dev);
1610 input_mt_destroy_slots(dev);
1611 kfree(dev->poller);
1612 kfree(dev->absinfo);
1613 kfree(dev->vals);
1614 kfree(dev);
1615
1616 module_put(THIS_MODULE);
1617}
1618
1619/*
1620 * Input uevent interface - loading event handlers based on
1621 * device bitfields.
1622 */
1623static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1624 const char *name, unsigned long *bitmap, int max)
1625{
1626 int len;
1627
1628 if (add_uevent_var(env, "%s", name))
1629 return -ENOMEM;
1630
1631 len = input_print_bitmap(&env->buf[env->buflen - 1],
1632 sizeof(env->buf) - env->buflen,
1633 bitmap, max, false);
1634 if (len >= (sizeof(env->buf) - env->buflen))
1635 return -ENOMEM;
1636
1637 env->buflen += len;
1638 return 0;
1639}
1640
1641static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1642 struct input_dev *dev)
1643{
1644 int len;
1645
1646 if (add_uevent_var(env, "MODALIAS="))
1647 return -ENOMEM;
1648
1649 len = input_print_modalias(&env->buf[env->buflen - 1],
1650 sizeof(env->buf) - env->buflen,
1651 dev, 0);
1652 if (len >= (sizeof(env->buf) - env->buflen))
1653 return -ENOMEM;
1654
1655 env->buflen += len;
1656 return 0;
1657}
1658
1659#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
1660 do { \
1661 int err = add_uevent_var(env, fmt, val); \
1662 if (err) \
1663 return err; \
1664 } while (0)
1665
1666#define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \
1667 do { \
1668 int err = input_add_uevent_bm_var(env, name, bm, max); \
1669 if (err) \
1670 return err; \
1671 } while (0)
1672
1673#define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
1674 do { \
1675 int err = input_add_uevent_modalias_var(env, dev); \
1676 if (err) \
1677 return err; \
1678 } while (0)
1679
1680static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1681{
1682 struct input_dev *dev = to_input_dev(device);
1683
1684 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1685 dev->id.bustype, dev->id.vendor,
1686 dev->id.product, dev->id.version);
1687 if (dev->name)
1688 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1689 if (dev->phys)
1690 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1691 if (dev->uniq)
1692 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1693
1694 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1695
1696 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1697 if (test_bit(EV_KEY, dev->evbit))
1698 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1699 if (test_bit(EV_REL, dev->evbit))
1700 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1701 if (test_bit(EV_ABS, dev->evbit))
1702 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1703 if (test_bit(EV_MSC, dev->evbit))
1704 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1705 if (test_bit(EV_LED, dev->evbit))
1706 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1707 if (test_bit(EV_SND, dev->evbit))
1708 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1709 if (test_bit(EV_FF, dev->evbit))
1710 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1711 if (test_bit(EV_SW, dev->evbit))
1712 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1713
1714 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1715
1716 return 0;
1717}
1718
1719#define INPUT_DO_TOGGLE(dev, type, bits, on) \
1720 do { \
1721 int i; \
1722 bool active; \
1723 \
1724 if (!test_bit(EV_##type, dev->evbit)) \
1725 break; \
1726 \
1727 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \
1728 active = test_bit(i, dev->bits); \
1729 if (!active && !on) \
1730 continue; \
1731 \
1732 dev->event(dev, EV_##type, i, on ? active : 0); \
1733 } \
1734 } while (0)
1735
1736static void input_dev_toggle(struct input_dev *dev, bool activate)
1737{
1738 if (!dev->event)
1739 return;
1740
1741 INPUT_DO_TOGGLE(dev, LED, led, activate);
1742 INPUT_DO_TOGGLE(dev, SND, snd, activate);
1743
1744 if (activate && test_bit(EV_REP, dev->evbit)) {
1745 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1746 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1747 }
1748}
1749
1750/**
1751 * input_reset_device() - reset/restore the state of input device
1752 * @dev: input device whose state needs to be reset
1753 *
1754 * This function tries to reset the state of an opened input device and
1755 * bring internal state and state if the hardware in sync with each other.
1756 * We mark all keys as released, restore LED state, repeat rate, etc.
1757 */
1758void input_reset_device(struct input_dev *dev)
1759{
1760 unsigned long flags;
1761
1762 mutex_lock(&dev->mutex);
1763 spin_lock_irqsave(&dev->event_lock, flags);
1764
1765 input_dev_toggle(dev, true);
1766 if (input_dev_release_keys(dev))
1767 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1768
1769 spin_unlock_irqrestore(&dev->event_lock, flags);
1770 mutex_unlock(&dev->mutex);
1771}
1772EXPORT_SYMBOL(input_reset_device);
1773
1774static int input_inhibit_device(struct input_dev *dev)
1775{
1776 mutex_lock(&dev->mutex);
1777
1778 if (dev->inhibited)
1779 goto out;
1780
1781 if (dev->users) {
1782 if (dev->close)
1783 dev->close(dev);
1784 if (dev->poller)
1785 input_dev_poller_stop(dev->poller);
1786 }
1787
1788 spin_lock_irq(&dev->event_lock);
1789 input_mt_release_slots(dev);
1790 input_dev_release_keys(dev);
1791 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
1792 input_dev_toggle(dev, false);
1793 spin_unlock_irq(&dev->event_lock);
1794
1795 dev->inhibited = true;
1796
1797out:
1798 mutex_unlock(&dev->mutex);
1799 return 0;
1800}
1801
1802static int input_uninhibit_device(struct input_dev *dev)
1803{
1804 int ret = 0;
1805
1806 mutex_lock(&dev->mutex);
1807
1808 if (!dev->inhibited)
1809 goto out;
1810
1811 if (dev->users) {
1812 if (dev->open) {
1813 ret = dev->open(dev);
1814 if (ret)
1815 goto out;
1816 }
1817 if (dev->poller)
1818 input_dev_poller_start(dev->poller);
1819 }
1820
1821 dev->inhibited = false;
1822 spin_lock_irq(&dev->event_lock);
1823 input_dev_toggle(dev, true);
1824 spin_unlock_irq(&dev->event_lock);
1825
1826out:
1827 mutex_unlock(&dev->mutex);
1828 return ret;
1829}
1830
1831#ifdef CONFIG_PM_SLEEP
1832static int input_dev_suspend(struct device *dev)
1833{
1834 struct input_dev *input_dev = to_input_dev(dev);
1835
1836 spin_lock_irq(&input_dev->event_lock);
1837
1838 /*
1839 * Keys that are pressed now are unlikely to be
1840 * still pressed when we resume.
1841 */
1842 if (input_dev_release_keys(input_dev))
1843 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1844
1845 /* Turn off LEDs and sounds, if any are active. */
1846 input_dev_toggle(input_dev, false);
1847
1848 spin_unlock_irq(&input_dev->event_lock);
1849
1850 return 0;
1851}
1852
1853static int input_dev_resume(struct device *dev)
1854{
1855 struct input_dev *input_dev = to_input_dev(dev);
1856
1857 spin_lock_irq(&input_dev->event_lock);
1858
1859 /* Restore state of LEDs and sounds, if any were active. */
1860 input_dev_toggle(input_dev, true);
1861
1862 spin_unlock_irq(&input_dev->event_lock);
1863
1864 return 0;
1865}
1866
1867static int input_dev_freeze(struct device *dev)
1868{
1869 struct input_dev *input_dev = to_input_dev(dev);
1870
1871 spin_lock_irq(&input_dev->event_lock);
1872
1873 /*
1874 * Keys that are pressed now are unlikely to be
1875 * still pressed when we resume.
1876 */
1877 if (input_dev_release_keys(input_dev))
1878 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1);
1879
1880 spin_unlock_irq(&input_dev->event_lock);
1881
1882 return 0;
1883}
1884
1885static int input_dev_poweroff(struct device *dev)
1886{
1887 struct input_dev *input_dev = to_input_dev(dev);
1888
1889 spin_lock_irq(&input_dev->event_lock);
1890
1891 /* Turn off LEDs and sounds, if any are active. */
1892 input_dev_toggle(input_dev, false);
1893
1894 spin_unlock_irq(&input_dev->event_lock);
1895
1896 return 0;
1897}
1898
1899static const struct dev_pm_ops input_dev_pm_ops = {
1900 .suspend = input_dev_suspend,
1901 .resume = input_dev_resume,
1902 .freeze = input_dev_freeze,
1903 .poweroff = input_dev_poweroff,
1904 .restore = input_dev_resume,
1905};
1906#endif /* CONFIG_PM */
1907
1908static const struct device_type input_dev_type = {
1909 .groups = input_dev_attr_groups,
1910 .release = input_dev_release,
1911 .uevent = input_dev_uevent,
1912#ifdef CONFIG_PM_SLEEP
1913 .pm = &input_dev_pm_ops,
1914#endif
1915};
1916
1917static char *input_devnode(const struct device *dev, umode_t *mode)
1918{
1919 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1920}
1921
1922struct class input_class = {
1923 .name = "input",
1924 .devnode = input_devnode,
1925};
1926EXPORT_SYMBOL_GPL(input_class);
1927
1928/**
1929 * input_allocate_device - allocate memory for new input device
1930 *
1931 * Returns prepared struct input_dev or %NULL.
1932 *
1933 * NOTE: Use input_free_device() to free devices that have not been
1934 * registered; input_unregister_device() should be used for already
1935 * registered devices.
1936 */
1937struct input_dev *input_allocate_device(void)
1938{
1939 static atomic_t input_no = ATOMIC_INIT(-1);
1940 struct input_dev *dev;
1941
1942 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1943 if (dev) {
1944 dev->dev.type = &input_dev_type;
1945 dev->dev.class = &input_class;
1946 device_initialize(&dev->dev);
1947 mutex_init(&dev->mutex);
1948 spin_lock_init(&dev->event_lock);
1949 timer_setup(&dev->timer, NULL, 0);
1950 INIT_LIST_HEAD(&dev->h_list);
1951 INIT_LIST_HEAD(&dev->node);
1952
1953 dev_set_name(&dev->dev, "input%lu",
1954 (unsigned long)atomic_inc_return(&input_no));
1955
1956 __module_get(THIS_MODULE);
1957 }
1958
1959 return dev;
1960}
1961EXPORT_SYMBOL(input_allocate_device);
1962
1963struct input_devres {
1964 struct input_dev *input;
1965};
1966
1967static int devm_input_device_match(struct device *dev, void *res, void *data)
1968{
1969 struct input_devres *devres = res;
1970
1971 return devres->input == data;
1972}
1973
1974static void devm_input_device_release(struct device *dev, void *res)
1975{
1976 struct input_devres *devres = res;
1977 struct input_dev *input = devres->input;
1978
1979 dev_dbg(dev, "%s: dropping reference to %s\n",
1980 __func__, dev_name(&input->dev));
1981 input_put_device(input);
1982}
1983
1984/**
1985 * devm_input_allocate_device - allocate managed input device
1986 * @dev: device owning the input device being created
1987 *
1988 * Returns prepared struct input_dev or %NULL.
1989 *
1990 * Managed input devices do not need to be explicitly unregistered or
1991 * freed as it will be done automatically when owner device unbinds from
1992 * its driver (or binding fails). Once managed input device is allocated,
1993 * it is ready to be set up and registered in the same fashion as regular
1994 * input device. There are no special devm_input_device_[un]register()
1995 * variants, regular ones work with both managed and unmanaged devices,
1996 * should you need them. In most cases however, managed input device need
1997 * not be explicitly unregistered or freed.
1998 *
1999 * NOTE: the owner device is set up as parent of input device and users
2000 * should not override it.
2001 */
2002struct input_dev *devm_input_allocate_device(struct device *dev)
2003{
2004 struct input_dev *input;
2005 struct input_devres *devres;
2006
2007 devres = devres_alloc(devm_input_device_release,
2008 sizeof(*devres), GFP_KERNEL);
2009 if (!devres)
2010 return NULL;
2011
2012 input = input_allocate_device();
2013 if (!input) {
2014 devres_free(devres);
2015 return NULL;
2016 }
2017
2018 input->dev.parent = dev;
2019 input->devres_managed = true;
2020
2021 devres->input = input;
2022 devres_add(dev, devres);
2023
2024 return input;
2025}
2026EXPORT_SYMBOL(devm_input_allocate_device);
2027
2028/**
2029 * input_free_device - free memory occupied by input_dev structure
2030 * @dev: input device to free
2031 *
2032 * This function should only be used if input_register_device()
2033 * was not called yet or if it failed. Once device was registered
2034 * use input_unregister_device() and memory will be freed once last
2035 * reference to the device is dropped.
2036 *
2037 * Device should be allocated by input_allocate_device().
2038 *
2039 * NOTE: If there are references to the input device then memory
2040 * will not be freed until last reference is dropped.
2041 */
2042void input_free_device(struct input_dev *dev)
2043{
2044 if (dev) {
2045 if (dev->devres_managed)
2046 WARN_ON(devres_destroy(dev->dev.parent,
2047 devm_input_device_release,
2048 devm_input_device_match,
2049 dev));
2050 input_put_device(dev);
2051 }
2052}
2053EXPORT_SYMBOL(input_free_device);
2054
2055/**
2056 * input_set_timestamp - set timestamp for input events
2057 * @dev: input device to set timestamp for
2058 * @timestamp: the time at which the event has occurred
2059 * in CLOCK_MONOTONIC
2060 *
2061 * This function is intended to provide to the input system a more
2062 * accurate time of when an event actually occurred. The driver should
2063 * call this function as soon as a timestamp is acquired ensuring
2064 * clock conversions in input_set_timestamp are done correctly.
2065 *
2066 * The system entering suspend state between timestamp acquisition and
2067 * calling input_set_timestamp can result in inaccurate conversions.
2068 */
2069void input_set_timestamp(struct input_dev *dev, ktime_t timestamp)
2070{
2071 dev->timestamp[INPUT_CLK_MONO] = timestamp;
2072 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp);
2073 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp,
2074 TK_OFFS_BOOT);
2075}
2076EXPORT_SYMBOL(input_set_timestamp);
2077
2078/**
2079 * input_get_timestamp - get timestamp for input events
2080 * @dev: input device to get timestamp from
2081 *
2082 * A valid timestamp is a timestamp of non-zero value.
2083 */
2084ktime_t *input_get_timestamp(struct input_dev *dev)
2085{
2086 const ktime_t invalid_timestamp = ktime_set(0, 0);
2087
2088 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp))
2089 input_set_timestamp(dev, ktime_get());
2090
2091 return dev->timestamp;
2092}
2093EXPORT_SYMBOL(input_get_timestamp);
2094
2095/**
2096 * input_set_capability - mark device as capable of a certain event
2097 * @dev: device that is capable of emitting or accepting event
2098 * @type: type of the event (EV_KEY, EV_REL, etc...)
2099 * @code: event code
2100 *
2101 * In addition to setting up corresponding bit in appropriate capability
2102 * bitmap the function also adjusts dev->evbit.
2103 */
2104void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
2105{
2106 if (type < EV_CNT && input_max_code[type] &&
2107 code > input_max_code[type]) {
2108 pr_err("%s: invalid code %u for type %u\n", __func__, code,
2109 type);
2110 dump_stack();
2111 return;
2112 }
2113
2114 switch (type) {
2115 case EV_KEY:
2116 __set_bit(code, dev->keybit);
2117 break;
2118
2119 case EV_REL:
2120 __set_bit(code, dev->relbit);
2121 break;
2122
2123 case EV_ABS:
2124 input_alloc_absinfo(dev);
2125 __set_bit(code, dev->absbit);
2126 break;
2127
2128 case EV_MSC:
2129 __set_bit(code, dev->mscbit);
2130 break;
2131
2132 case EV_SW:
2133 __set_bit(code, dev->swbit);
2134 break;
2135
2136 case EV_LED:
2137 __set_bit(code, dev->ledbit);
2138 break;
2139
2140 case EV_SND:
2141 __set_bit(code, dev->sndbit);
2142 break;
2143
2144 case EV_FF:
2145 __set_bit(code, dev->ffbit);
2146 break;
2147
2148 case EV_PWR:
2149 /* do nothing */
2150 break;
2151
2152 default:
2153 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
2154 dump_stack();
2155 return;
2156 }
2157
2158 __set_bit(type, dev->evbit);
2159}
2160EXPORT_SYMBOL(input_set_capability);
2161
2162static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
2163{
2164 int mt_slots;
2165 int i;
2166 unsigned int events;
2167
2168 if (dev->mt) {
2169 mt_slots = dev->mt->num_slots;
2170 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
2171 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
2172 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
2173 mt_slots = clamp(mt_slots, 2, 32);
2174 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
2175 mt_slots = 2;
2176 } else {
2177 mt_slots = 0;
2178 }
2179
2180 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
2181
2182 if (test_bit(EV_ABS, dev->evbit))
2183 for_each_set_bit(i, dev->absbit, ABS_CNT)
2184 events += input_is_mt_axis(i) ? mt_slots : 1;
2185
2186 if (test_bit(EV_REL, dev->evbit))
2187 events += bitmap_weight(dev->relbit, REL_CNT);
2188
2189 /* Make room for KEY and MSC events */
2190 events += 7;
2191
2192 return events;
2193}
2194
2195#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
2196 do { \
2197 if (!test_bit(EV_##type, dev->evbit)) \
2198 memset(dev->bits##bit, 0, \
2199 sizeof(dev->bits##bit)); \
2200 } while (0)
2201
2202static void input_cleanse_bitmasks(struct input_dev *dev)
2203{
2204 INPUT_CLEANSE_BITMASK(dev, KEY, key);
2205 INPUT_CLEANSE_BITMASK(dev, REL, rel);
2206 INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2207 INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2208 INPUT_CLEANSE_BITMASK(dev, LED, led);
2209 INPUT_CLEANSE_BITMASK(dev, SND, snd);
2210 INPUT_CLEANSE_BITMASK(dev, FF, ff);
2211 INPUT_CLEANSE_BITMASK(dev, SW, sw);
2212}
2213
2214static void __input_unregister_device(struct input_dev *dev)
2215{
2216 struct input_handle *handle, *next;
2217
2218 input_disconnect_device(dev);
2219
2220 mutex_lock(&input_mutex);
2221
2222 list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2223 handle->handler->disconnect(handle);
2224 WARN_ON(!list_empty(&dev->h_list));
2225
2226 del_timer_sync(&dev->timer);
2227 list_del_init(&dev->node);
2228
2229 input_wakeup_procfs_readers();
2230
2231 mutex_unlock(&input_mutex);
2232
2233 device_del(&dev->dev);
2234}
2235
2236static void devm_input_device_unregister(struct device *dev, void *res)
2237{
2238 struct input_devres *devres = res;
2239 struct input_dev *input = devres->input;
2240
2241 dev_dbg(dev, "%s: unregistering device %s\n",
2242 __func__, dev_name(&input->dev));
2243 __input_unregister_device(input);
2244}
2245
2246/*
2247 * Generate software autorepeat event. Note that we take
2248 * dev->event_lock here to avoid racing with input_event
2249 * which may cause keys get "stuck".
2250 */
2251static void input_repeat_key(struct timer_list *t)
2252{
2253 struct input_dev *dev = from_timer(dev, t, timer);
2254 unsigned long flags;
2255
2256 spin_lock_irqsave(&dev->event_lock, flags);
2257
2258 if (!dev->inhibited &&
2259 test_bit(dev->repeat_key, dev->key) &&
2260 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
2261
2262 input_set_timestamp(dev, ktime_get());
2263 input_handle_event(dev, EV_KEY, dev->repeat_key, 2);
2264 input_handle_event(dev, EV_SYN, SYN_REPORT, 1);
2265
2266 if (dev->rep[REP_PERIOD])
2267 mod_timer(&dev->timer, jiffies +
2268 msecs_to_jiffies(dev->rep[REP_PERIOD]));
2269 }
2270
2271 spin_unlock_irqrestore(&dev->event_lock, flags);
2272}
2273
2274/**
2275 * input_enable_softrepeat - enable software autorepeat
2276 * @dev: input device
2277 * @delay: repeat delay
2278 * @period: repeat period
2279 *
2280 * Enable software autorepeat on the input device.
2281 */
2282void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2283{
2284 dev->timer.function = input_repeat_key;
2285 dev->rep[REP_DELAY] = delay;
2286 dev->rep[REP_PERIOD] = period;
2287}
2288EXPORT_SYMBOL(input_enable_softrepeat);
2289
2290bool input_device_enabled(struct input_dev *dev)
2291{
2292 lockdep_assert_held(&dev->mutex);
2293
2294 return !dev->inhibited && dev->users > 0;
2295}
2296EXPORT_SYMBOL_GPL(input_device_enabled);
2297
2298/**
2299 * input_register_device - register device with input core
2300 * @dev: device to be registered
2301 *
2302 * This function registers device with input core. The device must be
2303 * allocated with input_allocate_device() and all it's capabilities
2304 * set up before registering.
2305 * If function fails the device must be freed with input_free_device().
2306 * Once device has been successfully registered it can be unregistered
2307 * with input_unregister_device(); input_free_device() should not be
2308 * called in this case.
2309 *
2310 * Note that this function is also used to register managed input devices
2311 * (ones allocated with devm_input_allocate_device()). Such managed input
2312 * devices need not be explicitly unregistered or freed, their tear down
2313 * is controlled by the devres infrastructure. It is also worth noting
2314 * that tear down of managed input devices is internally a 2-step process:
2315 * registered managed input device is first unregistered, but stays in
2316 * memory and can still handle input_event() calls (although events will
2317 * not be delivered anywhere). The freeing of managed input device will
2318 * happen later, when devres stack is unwound to the point where device
2319 * allocation was made.
2320 */
2321int input_register_device(struct input_dev *dev)
2322{
2323 struct input_devres *devres = NULL;
2324 struct input_handler *handler;
2325 unsigned int packet_size;
2326 const char *path;
2327 int error;
2328
2329 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2330 dev_err(&dev->dev,
2331 "Absolute device without dev->absinfo, refusing to register\n");
2332 return -EINVAL;
2333 }
2334
2335 if (dev->devres_managed) {
2336 devres = devres_alloc(devm_input_device_unregister,
2337 sizeof(*devres), GFP_KERNEL);
2338 if (!devres)
2339 return -ENOMEM;
2340
2341 devres->input = dev;
2342 }
2343
2344 /* Every input device generates EV_SYN/SYN_REPORT events. */
2345 __set_bit(EV_SYN, dev->evbit);
2346
2347 /* KEY_RESERVED is not supposed to be transmitted to userspace. */
2348 __clear_bit(KEY_RESERVED, dev->keybit);
2349
2350 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2351 input_cleanse_bitmasks(dev);
2352
2353 packet_size = input_estimate_events_per_packet(dev);
2354 if (dev->hint_events_per_packet < packet_size)
2355 dev->hint_events_per_packet = packet_size;
2356
2357 dev->max_vals = dev->hint_events_per_packet + 2;
2358 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
2359 if (!dev->vals) {
2360 error = -ENOMEM;
2361 goto err_devres_free;
2362 }
2363
2364 /*
2365 * If delay and period are pre-set by the driver, then autorepeating
2366 * is handled by the driver itself and we don't do it in input.c.
2367 */
2368 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2369 input_enable_softrepeat(dev, 250, 33);
2370
2371 if (!dev->getkeycode)
2372 dev->getkeycode = input_default_getkeycode;
2373
2374 if (!dev->setkeycode)
2375 dev->setkeycode = input_default_setkeycode;
2376
2377 if (dev->poller)
2378 input_dev_poller_finalize(dev->poller);
2379
2380 error = device_add(&dev->dev);
2381 if (error)
2382 goto err_free_vals;
2383
2384 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2385 pr_info("%s as %s\n",
2386 dev->name ? dev->name : "Unspecified device",
2387 path ? path : "N/A");
2388 kfree(path);
2389
2390 error = mutex_lock_interruptible(&input_mutex);
2391 if (error)
2392 goto err_device_del;
2393
2394 list_add_tail(&dev->node, &input_dev_list);
2395
2396 list_for_each_entry(handler, &input_handler_list, node)
2397 input_attach_handler(dev, handler);
2398
2399 input_wakeup_procfs_readers();
2400
2401 mutex_unlock(&input_mutex);
2402
2403 if (dev->devres_managed) {
2404 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2405 __func__, dev_name(&dev->dev));
2406 devres_add(dev->dev.parent, devres);
2407 }
2408 return 0;
2409
2410err_device_del:
2411 device_del(&dev->dev);
2412err_free_vals:
2413 kfree(dev->vals);
2414 dev->vals = NULL;
2415err_devres_free:
2416 devres_free(devres);
2417 return error;
2418}
2419EXPORT_SYMBOL(input_register_device);
2420
2421/**
2422 * input_unregister_device - unregister previously registered device
2423 * @dev: device to be unregistered
2424 *
2425 * This function unregisters an input device. Once device is unregistered
2426 * the caller should not try to access it as it may get freed at any moment.
2427 */
2428void input_unregister_device(struct input_dev *dev)
2429{
2430 if (dev->devres_managed) {
2431 WARN_ON(devres_destroy(dev->dev.parent,
2432 devm_input_device_unregister,
2433 devm_input_device_match,
2434 dev));
2435 __input_unregister_device(dev);
2436 /*
2437 * We do not do input_put_device() here because it will be done
2438 * when 2nd devres fires up.
2439 */
2440 } else {
2441 __input_unregister_device(dev);
2442 input_put_device(dev);
2443 }
2444}
2445EXPORT_SYMBOL(input_unregister_device);
2446
2447/**
2448 * input_register_handler - register a new input handler
2449 * @handler: handler to be registered
2450 *
2451 * This function registers a new input handler (interface) for input
2452 * devices in the system and attaches it to all input devices that
2453 * are compatible with the handler.
2454 */
2455int input_register_handler(struct input_handler *handler)
2456{
2457 struct input_dev *dev;
2458 int error;
2459
2460 error = mutex_lock_interruptible(&input_mutex);
2461 if (error)
2462 return error;
2463
2464 INIT_LIST_HEAD(&handler->h_list);
2465
2466 list_add_tail(&handler->node, &input_handler_list);
2467
2468 list_for_each_entry(dev, &input_dev_list, node)
2469 input_attach_handler(dev, handler);
2470
2471 input_wakeup_procfs_readers();
2472
2473 mutex_unlock(&input_mutex);
2474 return 0;
2475}
2476EXPORT_SYMBOL(input_register_handler);
2477
2478/**
2479 * input_unregister_handler - unregisters an input handler
2480 * @handler: handler to be unregistered
2481 *
2482 * This function disconnects a handler from its input devices and
2483 * removes it from lists of known handlers.
2484 */
2485void input_unregister_handler(struct input_handler *handler)
2486{
2487 struct input_handle *handle, *next;
2488
2489 mutex_lock(&input_mutex);
2490
2491 list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2492 handler->disconnect(handle);
2493 WARN_ON(!list_empty(&handler->h_list));
2494
2495 list_del_init(&handler->node);
2496
2497 input_wakeup_procfs_readers();
2498
2499 mutex_unlock(&input_mutex);
2500}
2501EXPORT_SYMBOL(input_unregister_handler);
2502
2503/**
2504 * input_handler_for_each_handle - handle iterator
2505 * @handler: input handler to iterate
2506 * @data: data for the callback
2507 * @fn: function to be called for each handle
2508 *
2509 * Iterate over @bus's list of devices, and call @fn for each, passing
2510 * it @data and stop when @fn returns a non-zero value. The function is
2511 * using RCU to traverse the list and therefore may be using in atomic
2512 * contexts. The @fn callback is invoked from RCU critical section and
2513 * thus must not sleep.
2514 */
2515int input_handler_for_each_handle(struct input_handler *handler, void *data,
2516 int (*fn)(struct input_handle *, void *))
2517{
2518 struct input_handle *handle;
2519 int retval = 0;
2520
2521 rcu_read_lock();
2522
2523 list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2524 retval = fn(handle, data);
2525 if (retval)
2526 break;
2527 }
2528
2529 rcu_read_unlock();
2530
2531 return retval;
2532}
2533EXPORT_SYMBOL(input_handler_for_each_handle);
2534
2535/**
2536 * input_register_handle - register a new input handle
2537 * @handle: handle to register
2538 *
2539 * This function puts a new input handle onto device's
2540 * and handler's lists so that events can flow through
2541 * it once it is opened using input_open_device().
2542 *
2543 * This function is supposed to be called from handler's
2544 * connect() method.
2545 */
2546int input_register_handle(struct input_handle *handle)
2547{
2548 struct input_handler *handler = handle->handler;
2549 struct input_dev *dev = handle->dev;
2550 int error;
2551
2552 /*
2553 * We take dev->mutex here to prevent race with
2554 * input_release_device().
2555 */
2556 error = mutex_lock_interruptible(&dev->mutex);
2557 if (error)
2558 return error;
2559
2560 /*
2561 * Filters go to the head of the list, normal handlers
2562 * to the tail.
2563 */
2564 if (handler->filter)
2565 list_add_rcu(&handle->d_node, &dev->h_list);
2566 else
2567 list_add_tail_rcu(&handle->d_node, &dev->h_list);
2568
2569 mutex_unlock(&dev->mutex);
2570
2571 /*
2572 * Since we are supposed to be called from ->connect()
2573 * which is mutually exclusive with ->disconnect()
2574 * we can't be racing with input_unregister_handle()
2575 * and so separate lock is not needed here.
2576 */
2577 list_add_tail_rcu(&handle->h_node, &handler->h_list);
2578
2579 if (handler->start)
2580 handler->start(handle);
2581
2582 return 0;
2583}
2584EXPORT_SYMBOL(input_register_handle);
2585
2586/**
2587 * input_unregister_handle - unregister an input handle
2588 * @handle: handle to unregister
2589 *
2590 * This function removes input handle from device's
2591 * and handler's lists.
2592 *
2593 * This function is supposed to be called from handler's
2594 * disconnect() method.
2595 */
2596void input_unregister_handle(struct input_handle *handle)
2597{
2598 struct input_dev *dev = handle->dev;
2599
2600 list_del_rcu(&handle->h_node);
2601
2602 /*
2603 * Take dev->mutex to prevent race with input_release_device().
2604 */
2605 mutex_lock(&dev->mutex);
2606 list_del_rcu(&handle->d_node);
2607 mutex_unlock(&dev->mutex);
2608
2609 synchronize_rcu();
2610}
2611EXPORT_SYMBOL(input_unregister_handle);
2612
2613/**
2614 * input_get_new_minor - allocates a new input minor number
2615 * @legacy_base: beginning or the legacy range to be searched
2616 * @legacy_num: size of legacy range
2617 * @allow_dynamic: whether we can also take ID from the dynamic range
2618 *
2619 * This function allocates a new device minor for from input major namespace.
2620 * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2621 * parameters and whether ID can be allocated from dynamic range if there are
2622 * no free IDs in legacy range.
2623 */
2624int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2625 bool allow_dynamic)
2626{
2627 /*
2628 * This function should be called from input handler's ->connect()
2629 * methods, which are serialized with input_mutex, so no additional
2630 * locking is needed here.
2631 */
2632 if (legacy_base >= 0) {
2633 int minor = ida_simple_get(&input_ida,
2634 legacy_base,
2635 legacy_base + legacy_num,
2636 GFP_KERNEL);
2637 if (minor >= 0 || !allow_dynamic)
2638 return minor;
2639 }
2640
2641 return ida_simple_get(&input_ida,
2642 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2643 GFP_KERNEL);
2644}
2645EXPORT_SYMBOL(input_get_new_minor);
2646
2647/**
2648 * input_free_minor - release previously allocated minor
2649 * @minor: minor to be released
2650 *
2651 * This function releases previously allocated input minor so that it can be
2652 * reused later.
2653 */
2654void input_free_minor(unsigned int minor)
2655{
2656 ida_simple_remove(&input_ida, minor);
2657}
2658EXPORT_SYMBOL(input_free_minor);
2659
2660static int __init input_init(void)
2661{
2662 int err;
2663
2664 err = class_register(&input_class);
2665 if (err) {
2666 pr_err("unable to register input_dev class\n");
2667 return err;
2668 }
2669
2670 err = input_proc_init();
2671 if (err)
2672 goto fail1;
2673
2674 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2675 INPUT_MAX_CHAR_DEVICES, "input");
2676 if (err) {
2677 pr_err("unable to register char major %d", INPUT_MAJOR);
2678 goto fail2;
2679 }
2680
2681 return 0;
2682
2683 fail2: input_proc_exit();
2684 fail1: class_unregister(&input_class);
2685 return err;
2686}
2687
2688static void __exit input_exit(void)
2689{
2690 input_proc_exit();
2691 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2692 INPUT_MAX_CHAR_DEVICES);
2693 class_unregister(&input_class);
2694}
2695
2696subsys_initcall(input_init);
2697module_exit(input_exit);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * The input core
4 *
5 * Copyright (c) 1999-2002 Vojtech Pavlik
6 */
7
8
9#define pr_fmt(fmt) KBUILD_BASENAME ": " fmt
10
11#include <linux/init.h>
12#include <linux/types.h>
13#include <linux/idr.h>
14#include <linux/input/mt.h>
15#include <linux/module.h>
16#include <linux/slab.h>
17#include <linux/random.h>
18#include <linux/major.h>
19#include <linux/proc_fs.h>
20#include <linux/sched.h>
21#include <linux/seq_file.h>
22#include <linux/poll.h>
23#include <linux/device.h>
24#include <linux/mutex.h>
25#include <linux/rcupdate.h>
26#include "input-compat.h"
27#include "input-poller.h"
28
29MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>");
30MODULE_DESCRIPTION("Input core");
31MODULE_LICENSE("GPL");
32
33#define INPUT_MAX_CHAR_DEVICES 1024
34#define INPUT_FIRST_DYNAMIC_DEV 256
35static DEFINE_IDA(input_ida);
36
37static LIST_HEAD(input_dev_list);
38static LIST_HEAD(input_handler_list);
39
40/*
41 * input_mutex protects access to both input_dev_list and input_handler_list.
42 * This also causes input_[un]register_device and input_[un]register_handler
43 * be mutually exclusive which simplifies locking in drivers implementing
44 * input handlers.
45 */
46static DEFINE_MUTEX(input_mutex);
47
48static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 };
49
50static inline int is_event_supported(unsigned int code,
51 unsigned long *bm, unsigned int max)
52{
53 return code <= max && test_bit(code, bm);
54}
55
56static int input_defuzz_abs_event(int value, int old_val, int fuzz)
57{
58 if (fuzz) {
59 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2)
60 return old_val;
61
62 if (value > old_val - fuzz && value < old_val + fuzz)
63 return (old_val * 3 + value) / 4;
64
65 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2)
66 return (old_val + value) / 2;
67 }
68
69 return value;
70}
71
72static void input_start_autorepeat(struct input_dev *dev, int code)
73{
74 if (test_bit(EV_REP, dev->evbit) &&
75 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] &&
76 dev->timer.function) {
77 dev->repeat_key = code;
78 mod_timer(&dev->timer,
79 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY]));
80 }
81}
82
83static void input_stop_autorepeat(struct input_dev *dev)
84{
85 del_timer(&dev->timer);
86}
87
88/*
89 * Pass event first through all filters and then, if event has not been
90 * filtered out, through all open handles. This function is called with
91 * dev->event_lock held and interrupts disabled.
92 */
93static unsigned int input_to_handler(struct input_handle *handle,
94 struct input_value *vals, unsigned int count)
95{
96 struct input_handler *handler = handle->handler;
97 struct input_value *end = vals;
98 struct input_value *v;
99
100 if (handler->filter) {
101 for (v = vals; v != vals + count; v++) {
102 if (handler->filter(handle, v->type, v->code, v->value))
103 continue;
104 if (end != v)
105 *end = *v;
106 end++;
107 }
108 count = end - vals;
109 }
110
111 if (!count)
112 return 0;
113
114 if (handler->events)
115 handler->events(handle, vals, count);
116 else if (handler->event)
117 for (v = vals; v != vals + count; v++)
118 handler->event(handle, v->type, v->code, v->value);
119
120 return count;
121}
122
123/*
124 * Pass values first through all filters and then, if event has not been
125 * filtered out, through all open handles. This function is called with
126 * dev->event_lock held and interrupts disabled.
127 */
128static void input_pass_values(struct input_dev *dev,
129 struct input_value *vals, unsigned int count)
130{
131 struct input_handle *handle;
132 struct input_value *v;
133
134 if (!count)
135 return;
136
137 rcu_read_lock();
138
139 handle = rcu_dereference(dev->grab);
140 if (handle) {
141 count = input_to_handler(handle, vals, count);
142 } else {
143 list_for_each_entry_rcu(handle, &dev->h_list, d_node)
144 if (handle->open) {
145 count = input_to_handler(handle, vals, count);
146 if (!count)
147 break;
148 }
149 }
150
151 rcu_read_unlock();
152
153 /* trigger auto repeat for key events */
154 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) {
155 for (v = vals; v != vals + count; v++) {
156 if (v->type == EV_KEY && v->value != 2) {
157 if (v->value)
158 input_start_autorepeat(dev, v->code);
159 else
160 input_stop_autorepeat(dev);
161 }
162 }
163 }
164}
165
166static void input_pass_event(struct input_dev *dev,
167 unsigned int type, unsigned int code, int value)
168{
169 struct input_value vals[] = { { type, code, value } };
170
171 input_pass_values(dev, vals, ARRAY_SIZE(vals));
172}
173
174/*
175 * Generate software autorepeat event. Note that we take
176 * dev->event_lock here to avoid racing with input_event
177 * which may cause keys get "stuck".
178 */
179static void input_repeat_key(struct timer_list *t)
180{
181 struct input_dev *dev = from_timer(dev, t, timer);
182 unsigned long flags;
183
184 spin_lock_irqsave(&dev->event_lock, flags);
185
186 if (test_bit(dev->repeat_key, dev->key) &&
187 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) {
188 struct input_value vals[] = {
189 { EV_KEY, dev->repeat_key, 2 },
190 input_value_sync
191 };
192
193 input_set_timestamp(dev, ktime_get());
194 input_pass_values(dev, vals, ARRAY_SIZE(vals));
195
196 if (dev->rep[REP_PERIOD])
197 mod_timer(&dev->timer, jiffies +
198 msecs_to_jiffies(dev->rep[REP_PERIOD]));
199 }
200
201 spin_unlock_irqrestore(&dev->event_lock, flags);
202}
203
204#define INPUT_IGNORE_EVENT 0
205#define INPUT_PASS_TO_HANDLERS 1
206#define INPUT_PASS_TO_DEVICE 2
207#define INPUT_SLOT 4
208#define INPUT_FLUSH 8
209#define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE)
210
211static int input_handle_abs_event(struct input_dev *dev,
212 unsigned int code, int *pval)
213{
214 struct input_mt *mt = dev->mt;
215 bool is_mt_event;
216 int *pold;
217
218 if (code == ABS_MT_SLOT) {
219 /*
220 * "Stage" the event; we'll flush it later, when we
221 * get actual touch data.
222 */
223 if (mt && *pval >= 0 && *pval < mt->num_slots)
224 mt->slot = *pval;
225
226 return INPUT_IGNORE_EVENT;
227 }
228
229 is_mt_event = input_is_mt_value(code);
230
231 if (!is_mt_event) {
232 pold = &dev->absinfo[code].value;
233 } else if (mt) {
234 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST];
235 } else {
236 /*
237 * Bypass filtering for multi-touch events when
238 * not employing slots.
239 */
240 pold = NULL;
241 }
242
243 if (pold) {
244 *pval = input_defuzz_abs_event(*pval, *pold,
245 dev->absinfo[code].fuzz);
246 if (*pold == *pval)
247 return INPUT_IGNORE_EVENT;
248
249 *pold = *pval;
250 }
251
252 /* Flush pending "slot" event */
253 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) {
254 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot);
255 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT;
256 }
257
258 return INPUT_PASS_TO_HANDLERS;
259}
260
261static int input_get_disposition(struct input_dev *dev,
262 unsigned int type, unsigned int code, int *pval)
263{
264 int disposition = INPUT_IGNORE_EVENT;
265 int value = *pval;
266
267 switch (type) {
268
269 case EV_SYN:
270 switch (code) {
271 case SYN_CONFIG:
272 disposition = INPUT_PASS_TO_ALL;
273 break;
274
275 case SYN_REPORT:
276 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH;
277 break;
278 case SYN_MT_REPORT:
279 disposition = INPUT_PASS_TO_HANDLERS;
280 break;
281 }
282 break;
283
284 case EV_KEY:
285 if (is_event_supported(code, dev->keybit, KEY_MAX)) {
286
287 /* auto-repeat bypasses state updates */
288 if (value == 2) {
289 disposition = INPUT_PASS_TO_HANDLERS;
290 break;
291 }
292
293 if (!!test_bit(code, dev->key) != !!value) {
294
295 __change_bit(code, dev->key);
296 disposition = INPUT_PASS_TO_HANDLERS;
297 }
298 }
299 break;
300
301 case EV_SW:
302 if (is_event_supported(code, dev->swbit, SW_MAX) &&
303 !!test_bit(code, dev->sw) != !!value) {
304
305 __change_bit(code, dev->sw);
306 disposition = INPUT_PASS_TO_HANDLERS;
307 }
308 break;
309
310 case EV_ABS:
311 if (is_event_supported(code, dev->absbit, ABS_MAX))
312 disposition = input_handle_abs_event(dev, code, &value);
313
314 break;
315
316 case EV_REL:
317 if (is_event_supported(code, dev->relbit, REL_MAX) && value)
318 disposition = INPUT_PASS_TO_HANDLERS;
319
320 break;
321
322 case EV_MSC:
323 if (is_event_supported(code, dev->mscbit, MSC_MAX))
324 disposition = INPUT_PASS_TO_ALL;
325
326 break;
327
328 case EV_LED:
329 if (is_event_supported(code, dev->ledbit, LED_MAX) &&
330 !!test_bit(code, dev->led) != !!value) {
331
332 __change_bit(code, dev->led);
333 disposition = INPUT_PASS_TO_ALL;
334 }
335 break;
336
337 case EV_SND:
338 if (is_event_supported(code, dev->sndbit, SND_MAX)) {
339
340 if (!!test_bit(code, dev->snd) != !!value)
341 __change_bit(code, dev->snd);
342 disposition = INPUT_PASS_TO_ALL;
343 }
344 break;
345
346 case EV_REP:
347 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) {
348 dev->rep[code] = value;
349 disposition = INPUT_PASS_TO_ALL;
350 }
351 break;
352
353 case EV_FF:
354 if (value >= 0)
355 disposition = INPUT_PASS_TO_ALL;
356 break;
357
358 case EV_PWR:
359 disposition = INPUT_PASS_TO_ALL;
360 break;
361 }
362
363 *pval = value;
364 return disposition;
365}
366
367static void input_handle_event(struct input_dev *dev,
368 unsigned int type, unsigned int code, int value)
369{
370 int disposition;
371
372 /* filter-out events from inhibited devices */
373 if (dev->inhibited)
374 return;
375
376 disposition = input_get_disposition(dev, type, code, &value);
377 if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN)
378 add_input_randomness(type, code, value);
379
380 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
381 dev->event(dev, type, code, value);
382
383 if (!dev->vals)
384 return;
385
386 if (disposition & INPUT_PASS_TO_HANDLERS) {
387 struct input_value *v;
388
389 if (disposition & INPUT_SLOT) {
390 v = &dev->vals[dev->num_vals++];
391 v->type = EV_ABS;
392 v->code = ABS_MT_SLOT;
393 v->value = dev->mt->slot;
394 }
395
396 v = &dev->vals[dev->num_vals++];
397 v->type = type;
398 v->code = code;
399 v->value = value;
400 }
401
402 if (disposition & INPUT_FLUSH) {
403 if (dev->num_vals >= 2)
404 input_pass_values(dev, dev->vals, dev->num_vals);
405 dev->num_vals = 0;
406 /*
407 * Reset the timestamp on flush so we won't end up
408 * with a stale one. Note we only need to reset the
409 * monolithic one as we use its presence when deciding
410 * whether to generate a synthetic timestamp.
411 */
412 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0);
413 } else if (dev->num_vals >= dev->max_vals - 2) {
414 dev->vals[dev->num_vals++] = input_value_sync;
415 input_pass_values(dev, dev->vals, dev->num_vals);
416 dev->num_vals = 0;
417 }
418
419}
420
421/**
422 * input_event() - report new input event
423 * @dev: device that generated the event
424 * @type: type of the event
425 * @code: event code
426 * @value: value of the event
427 *
428 * This function should be used by drivers implementing various input
429 * devices to report input events. See also input_inject_event().
430 *
431 * NOTE: input_event() may be safely used right after input device was
432 * allocated with input_allocate_device(), even before it is registered
433 * with input_register_device(), but the event will not reach any of the
434 * input handlers. Such early invocation of input_event() may be used
435 * to 'seed' initial state of a switch or initial position of absolute
436 * axis, etc.
437 */
438void input_event(struct input_dev *dev,
439 unsigned int type, unsigned int code, int value)
440{
441 unsigned long flags;
442
443 if (is_event_supported(type, dev->evbit, EV_MAX)) {
444
445 spin_lock_irqsave(&dev->event_lock, flags);
446 input_handle_event(dev, type, code, value);
447 spin_unlock_irqrestore(&dev->event_lock, flags);
448 }
449}
450EXPORT_SYMBOL(input_event);
451
452/**
453 * input_inject_event() - send input event from input handler
454 * @handle: input handle to send event through
455 * @type: type of the event
456 * @code: event code
457 * @value: value of the event
458 *
459 * Similar to input_event() but will ignore event if device is
460 * "grabbed" and handle injecting event is not the one that owns
461 * the device.
462 */
463void input_inject_event(struct input_handle *handle,
464 unsigned int type, unsigned int code, int value)
465{
466 struct input_dev *dev = handle->dev;
467 struct input_handle *grab;
468 unsigned long flags;
469
470 if (is_event_supported(type, dev->evbit, EV_MAX)) {
471 spin_lock_irqsave(&dev->event_lock, flags);
472
473 rcu_read_lock();
474 grab = rcu_dereference(dev->grab);
475 if (!grab || grab == handle)
476 input_handle_event(dev, type, code, value);
477 rcu_read_unlock();
478
479 spin_unlock_irqrestore(&dev->event_lock, flags);
480 }
481}
482EXPORT_SYMBOL(input_inject_event);
483
484/**
485 * input_alloc_absinfo - allocates array of input_absinfo structs
486 * @dev: the input device emitting absolute events
487 *
488 * If the absinfo struct the caller asked for is already allocated, this
489 * functions will not do anything.
490 */
491void input_alloc_absinfo(struct input_dev *dev)
492{
493 if (dev->absinfo)
494 return;
495
496 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL);
497 if (!dev->absinfo) {
498 dev_err(dev->dev.parent ?: &dev->dev,
499 "%s: unable to allocate memory\n", __func__);
500 /*
501 * We will handle this allocation failure in
502 * input_register_device() when we refuse to register input
503 * device with ABS bits but without absinfo.
504 */
505 }
506}
507EXPORT_SYMBOL(input_alloc_absinfo);
508
509void input_set_abs_params(struct input_dev *dev, unsigned int axis,
510 int min, int max, int fuzz, int flat)
511{
512 struct input_absinfo *absinfo;
513
514 input_alloc_absinfo(dev);
515 if (!dev->absinfo)
516 return;
517
518 absinfo = &dev->absinfo[axis];
519 absinfo->minimum = min;
520 absinfo->maximum = max;
521 absinfo->fuzz = fuzz;
522 absinfo->flat = flat;
523
524 __set_bit(EV_ABS, dev->evbit);
525 __set_bit(axis, dev->absbit);
526}
527EXPORT_SYMBOL(input_set_abs_params);
528
529
530/**
531 * input_grab_device - grabs device for exclusive use
532 * @handle: input handle that wants to own the device
533 *
534 * When a device is grabbed by an input handle all events generated by
535 * the device are delivered only to this handle. Also events injected
536 * by other input handles are ignored while device is grabbed.
537 */
538int input_grab_device(struct input_handle *handle)
539{
540 struct input_dev *dev = handle->dev;
541 int retval;
542
543 retval = mutex_lock_interruptible(&dev->mutex);
544 if (retval)
545 return retval;
546
547 if (dev->grab) {
548 retval = -EBUSY;
549 goto out;
550 }
551
552 rcu_assign_pointer(dev->grab, handle);
553
554 out:
555 mutex_unlock(&dev->mutex);
556 return retval;
557}
558EXPORT_SYMBOL(input_grab_device);
559
560static void __input_release_device(struct input_handle *handle)
561{
562 struct input_dev *dev = handle->dev;
563 struct input_handle *grabber;
564
565 grabber = rcu_dereference_protected(dev->grab,
566 lockdep_is_held(&dev->mutex));
567 if (grabber == handle) {
568 rcu_assign_pointer(dev->grab, NULL);
569 /* Make sure input_pass_event() notices that grab is gone */
570 synchronize_rcu();
571
572 list_for_each_entry(handle, &dev->h_list, d_node)
573 if (handle->open && handle->handler->start)
574 handle->handler->start(handle);
575 }
576}
577
578/**
579 * input_release_device - release previously grabbed device
580 * @handle: input handle that owns the device
581 *
582 * Releases previously grabbed device so that other input handles can
583 * start receiving input events. Upon release all handlers attached
584 * to the device have their start() method called so they have a change
585 * to synchronize device state with the rest of the system.
586 */
587void input_release_device(struct input_handle *handle)
588{
589 struct input_dev *dev = handle->dev;
590
591 mutex_lock(&dev->mutex);
592 __input_release_device(handle);
593 mutex_unlock(&dev->mutex);
594}
595EXPORT_SYMBOL(input_release_device);
596
597/**
598 * input_open_device - open input device
599 * @handle: handle through which device is being accessed
600 *
601 * This function should be called by input handlers when they
602 * want to start receive events from given input device.
603 */
604int input_open_device(struct input_handle *handle)
605{
606 struct input_dev *dev = handle->dev;
607 int retval;
608
609 retval = mutex_lock_interruptible(&dev->mutex);
610 if (retval)
611 return retval;
612
613 if (dev->going_away) {
614 retval = -ENODEV;
615 goto out;
616 }
617
618 handle->open++;
619
620 if (dev->users++ || dev->inhibited) {
621 /*
622 * Device is already opened and/or inhibited,
623 * so we can exit immediately and report success.
624 */
625 goto out;
626 }
627
628 if (dev->open) {
629 retval = dev->open(dev);
630 if (retval) {
631 dev->users--;
632 handle->open--;
633 /*
634 * Make sure we are not delivering any more events
635 * through this handle
636 */
637 synchronize_rcu();
638 goto out;
639 }
640 }
641
642 if (dev->poller)
643 input_dev_poller_start(dev->poller);
644
645 out:
646 mutex_unlock(&dev->mutex);
647 return retval;
648}
649EXPORT_SYMBOL(input_open_device);
650
651int input_flush_device(struct input_handle *handle, struct file *file)
652{
653 struct input_dev *dev = handle->dev;
654 int retval;
655
656 retval = mutex_lock_interruptible(&dev->mutex);
657 if (retval)
658 return retval;
659
660 if (dev->flush)
661 retval = dev->flush(dev, file);
662
663 mutex_unlock(&dev->mutex);
664 return retval;
665}
666EXPORT_SYMBOL(input_flush_device);
667
668/**
669 * input_close_device - close input device
670 * @handle: handle through which device is being accessed
671 *
672 * This function should be called by input handlers when they
673 * want to stop receive events from given input device.
674 */
675void input_close_device(struct input_handle *handle)
676{
677 struct input_dev *dev = handle->dev;
678
679 mutex_lock(&dev->mutex);
680
681 __input_release_device(handle);
682
683 if (!dev->inhibited && !--dev->users) {
684 if (dev->poller)
685 input_dev_poller_stop(dev->poller);
686 if (dev->close)
687 dev->close(dev);
688 }
689
690 if (!--handle->open) {
691 /*
692 * synchronize_rcu() makes sure that input_pass_event()
693 * completed and that no more input events are delivered
694 * through this handle
695 */
696 synchronize_rcu();
697 }
698
699 mutex_unlock(&dev->mutex);
700}
701EXPORT_SYMBOL(input_close_device);
702
703/*
704 * Simulate keyup events for all keys that are marked as pressed.
705 * The function must be called with dev->event_lock held.
706 */
707static void input_dev_release_keys(struct input_dev *dev)
708{
709 bool need_sync = false;
710 int code;
711
712 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) {
713 for_each_set_bit(code, dev->key, KEY_CNT) {
714 input_pass_event(dev, EV_KEY, code, 0);
715 need_sync = true;
716 }
717
718 if (need_sync)
719 input_pass_event(dev, EV_SYN, SYN_REPORT, 1);
720
721 memset(dev->key, 0, sizeof(dev->key));
722 }
723}
724
725/*
726 * Prepare device for unregistering
727 */
728static void input_disconnect_device(struct input_dev *dev)
729{
730 struct input_handle *handle;
731
732 /*
733 * Mark device as going away. Note that we take dev->mutex here
734 * not to protect access to dev->going_away but rather to ensure
735 * that there are no threads in the middle of input_open_device()
736 */
737 mutex_lock(&dev->mutex);
738 dev->going_away = true;
739 mutex_unlock(&dev->mutex);
740
741 spin_lock_irq(&dev->event_lock);
742
743 /*
744 * Simulate keyup events for all pressed keys so that handlers
745 * are not left with "stuck" keys. The driver may continue
746 * generate events even after we done here but they will not
747 * reach any handlers.
748 */
749 input_dev_release_keys(dev);
750
751 list_for_each_entry(handle, &dev->h_list, d_node)
752 handle->open = 0;
753
754 spin_unlock_irq(&dev->event_lock);
755}
756
757/**
758 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry
759 * @ke: keymap entry containing scancode to be converted.
760 * @scancode: pointer to the location where converted scancode should
761 * be stored.
762 *
763 * This function is used to convert scancode stored in &struct keymap_entry
764 * into scalar form understood by legacy keymap handling methods. These
765 * methods expect scancodes to be represented as 'unsigned int'.
766 */
767int input_scancode_to_scalar(const struct input_keymap_entry *ke,
768 unsigned int *scancode)
769{
770 switch (ke->len) {
771 case 1:
772 *scancode = *((u8 *)ke->scancode);
773 break;
774
775 case 2:
776 *scancode = *((u16 *)ke->scancode);
777 break;
778
779 case 4:
780 *scancode = *((u32 *)ke->scancode);
781 break;
782
783 default:
784 return -EINVAL;
785 }
786
787 return 0;
788}
789EXPORT_SYMBOL(input_scancode_to_scalar);
790
791/*
792 * Those routines handle the default case where no [gs]etkeycode() is
793 * defined. In this case, an array indexed by the scancode is used.
794 */
795
796static unsigned int input_fetch_keycode(struct input_dev *dev,
797 unsigned int index)
798{
799 switch (dev->keycodesize) {
800 case 1:
801 return ((u8 *)dev->keycode)[index];
802
803 case 2:
804 return ((u16 *)dev->keycode)[index];
805
806 default:
807 return ((u32 *)dev->keycode)[index];
808 }
809}
810
811static int input_default_getkeycode(struct input_dev *dev,
812 struct input_keymap_entry *ke)
813{
814 unsigned int index;
815 int error;
816
817 if (!dev->keycodesize)
818 return -EINVAL;
819
820 if (ke->flags & INPUT_KEYMAP_BY_INDEX)
821 index = ke->index;
822 else {
823 error = input_scancode_to_scalar(ke, &index);
824 if (error)
825 return error;
826 }
827
828 if (index >= dev->keycodemax)
829 return -EINVAL;
830
831 ke->keycode = input_fetch_keycode(dev, index);
832 ke->index = index;
833 ke->len = sizeof(index);
834 memcpy(ke->scancode, &index, sizeof(index));
835
836 return 0;
837}
838
839static int input_default_setkeycode(struct input_dev *dev,
840 const struct input_keymap_entry *ke,
841 unsigned int *old_keycode)
842{
843 unsigned int index;
844 int error;
845 int i;
846
847 if (!dev->keycodesize)
848 return -EINVAL;
849
850 if (ke->flags & INPUT_KEYMAP_BY_INDEX) {
851 index = ke->index;
852 } else {
853 error = input_scancode_to_scalar(ke, &index);
854 if (error)
855 return error;
856 }
857
858 if (index >= dev->keycodemax)
859 return -EINVAL;
860
861 if (dev->keycodesize < sizeof(ke->keycode) &&
862 (ke->keycode >> (dev->keycodesize * 8)))
863 return -EINVAL;
864
865 switch (dev->keycodesize) {
866 case 1: {
867 u8 *k = (u8 *)dev->keycode;
868 *old_keycode = k[index];
869 k[index] = ke->keycode;
870 break;
871 }
872 case 2: {
873 u16 *k = (u16 *)dev->keycode;
874 *old_keycode = k[index];
875 k[index] = ke->keycode;
876 break;
877 }
878 default: {
879 u32 *k = (u32 *)dev->keycode;
880 *old_keycode = k[index];
881 k[index] = ke->keycode;
882 break;
883 }
884 }
885
886 if (*old_keycode <= KEY_MAX) {
887 __clear_bit(*old_keycode, dev->keybit);
888 for (i = 0; i < dev->keycodemax; i++) {
889 if (input_fetch_keycode(dev, i) == *old_keycode) {
890 __set_bit(*old_keycode, dev->keybit);
891 /* Setting the bit twice is useless, so break */
892 break;
893 }
894 }
895 }
896
897 __set_bit(ke->keycode, dev->keybit);
898 return 0;
899}
900
901/**
902 * input_get_keycode - retrieve keycode currently mapped to a given scancode
903 * @dev: input device which keymap is being queried
904 * @ke: keymap entry
905 *
906 * This function should be called by anyone interested in retrieving current
907 * keymap. Presently evdev handlers use it.
908 */
909int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke)
910{
911 unsigned long flags;
912 int retval;
913
914 spin_lock_irqsave(&dev->event_lock, flags);
915 retval = dev->getkeycode(dev, ke);
916 spin_unlock_irqrestore(&dev->event_lock, flags);
917
918 return retval;
919}
920EXPORT_SYMBOL(input_get_keycode);
921
922/**
923 * input_set_keycode - attribute a keycode to a given scancode
924 * @dev: input device which keymap is being updated
925 * @ke: new keymap entry
926 *
927 * This function should be called by anyone needing to update current
928 * keymap. Presently keyboard and evdev handlers use it.
929 */
930int input_set_keycode(struct input_dev *dev,
931 const struct input_keymap_entry *ke)
932{
933 unsigned long flags;
934 unsigned int old_keycode;
935 int retval;
936
937 if (ke->keycode > KEY_MAX)
938 return -EINVAL;
939
940 spin_lock_irqsave(&dev->event_lock, flags);
941
942 retval = dev->setkeycode(dev, ke, &old_keycode);
943 if (retval)
944 goto out;
945
946 /* Make sure KEY_RESERVED did not get enabled. */
947 __clear_bit(KEY_RESERVED, dev->keybit);
948
949 /*
950 * Simulate keyup event if keycode is not present
951 * in the keymap anymore
952 */
953 if (old_keycode > KEY_MAX) {
954 dev_warn(dev->dev.parent ?: &dev->dev,
955 "%s: got too big old keycode %#x\n",
956 __func__, old_keycode);
957 } else if (test_bit(EV_KEY, dev->evbit) &&
958 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) &&
959 __test_and_clear_bit(old_keycode, dev->key)) {
960 struct input_value vals[] = {
961 { EV_KEY, old_keycode, 0 },
962 input_value_sync
963 };
964
965 input_pass_values(dev, vals, ARRAY_SIZE(vals));
966 }
967
968 out:
969 spin_unlock_irqrestore(&dev->event_lock, flags);
970
971 return retval;
972}
973EXPORT_SYMBOL(input_set_keycode);
974
975bool input_match_device_id(const struct input_dev *dev,
976 const struct input_device_id *id)
977{
978 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS)
979 if (id->bustype != dev->id.bustype)
980 return false;
981
982 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR)
983 if (id->vendor != dev->id.vendor)
984 return false;
985
986 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT)
987 if (id->product != dev->id.product)
988 return false;
989
990 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION)
991 if (id->version != dev->id.version)
992 return false;
993
994 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) ||
995 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) ||
996 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) ||
997 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) ||
998 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) ||
999 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) ||
1000 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) ||
1001 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) ||
1002 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) ||
1003 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) {
1004 return false;
1005 }
1006
1007 return true;
1008}
1009EXPORT_SYMBOL(input_match_device_id);
1010
1011static const struct input_device_id *input_match_device(struct input_handler *handler,
1012 struct input_dev *dev)
1013{
1014 const struct input_device_id *id;
1015
1016 for (id = handler->id_table; id->flags || id->driver_info; id++) {
1017 if (input_match_device_id(dev, id) &&
1018 (!handler->match || handler->match(handler, dev))) {
1019 return id;
1020 }
1021 }
1022
1023 return NULL;
1024}
1025
1026static int input_attach_handler(struct input_dev *dev, struct input_handler *handler)
1027{
1028 const struct input_device_id *id;
1029 int error;
1030
1031 id = input_match_device(handler, dev);
1032 if (!id)
1033 return -ENODEV;
1034
1035 error = handler->connect(handler, dev, id);
1036 if (error && error != -ENODEV)
1037 pr_err("failed to attach handler %s to device %s, error: %d\n",
1038 handler->name, kobject_name(&dev->dev.kobj), error);
1039
1040 return error;
1041}
1042
1043#ifdef CONFIG_COMPAT
1044
1045static int input_bits_to_string(char *buf, int buf_size,
1046 unsigned long bits, bool skip_empty)
1047{
1048 int len = 0;
1049
1050 if (in_compat_syscall()) {
1051 u32 dword = bits >> 32;
1052 if (dword || !skip_empty)
1053 len += snprintf(buf, buf_size, "%x ", dword);
1054
1055 dword = bits & 0xffffffffUL;
1056 if (dword || !skip_empty || len)
1057 len += snprintf(buf + len, max(buf_size - len, 0),
1058 "%x", dword);
1059 } else {
1060 if (bits || !skip_empty)
1061 len += snprintf(buf, buf_size, "%lx", bits);
1062 }
1063
1064 return len;
1065}
1066
1067#else /* !CONFIG_COMPAT */
1068
1069static int input_bits_to_string(char *buf, int buf_size,
1070 unsigned long bits, bool skip_empty)
1071{
1072 return bits || !skip_empty ?
1073 snprintf(buf, buf_size, "%lx", bits) : 0;
1074}
1075
1076#endif
1077
1078#ifdef CONFIG_PROC_FS
1079
1080static struct proc_dir_entry *proc_bus_input_dir;
1081static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait);
1082static int input_devices_state;
1083
1084static inline void input_wakeup_procfs_readers(void)
1085{
1086 input_devices_state++;
1087 wake_up(&input_devices_poll_wait);
1088}
1089
1090static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait)
1091{
1092 poll_wait(file, &input_devices_poll_wait, wait);
1093 if (file->f_version != input_devices_state) {
1094 file->f_version = input_devices_state;
1095 return EPOLLIN | EPOLLRDNORM;
1096 }
1097
1098 return 0;
1099}
1100
1101union input_seq_state {
1102 struct {
1103 unsigned short pos;
1104 bool mutex_acquired;
1105 };
1106 void *p;
1107};
1108
1109static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos)
1110{
1111 union input_seq_state *state = (union input_seq_state *)&seq->private;
1112 int error;
1113
1114 /* We need to fit into seq->private pointer */
1115 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1116
1117 error = mutex_lock_interruptible(&input_mutex);
1118 if (error) {
1119 state->mutex_acquired = false;
1120 return ERR_PTR(error);
1121 }
1122
1123 state->mutex_acquired = true;
1124
1125 return seq_list_start(&input_dev_list, *pos);
1126}
1127
1128static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1129{
1130 return seq_list_next(v, &input_dev_list, pos);
1131}
1132
1133static void input_seq_stop(struct seq_file *seq, void *v)
1134{
1135 union input_seq_state *state = (union input_seq_state *)&seq->private;
1136
1137 if (state->mutex_acquired)
1138 mutex_unlock(&input_mutex);
1139}
1140
1141static void input_seq_print_bitmap(struct seq_file *seq, const char *name,
1142 unsigned long *bitmap, int max)
1143{
1144 int i;
1145 bool skip_empty = true;
1146 char buf[18];
1147
1148 seq_printf(seq, "B: %s=", name);
1149
1150 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1151 if (input_bits_to_string(buf, sizeof(buf),
1152 bitmap[i], skip_empty)) {
1153 skip_empty = false;
1154 seq_printf(seq, "%s%s", buf, i > 0 ? " " : "");
1155 }
1156 }
1157
1158 /*
1159 * If no output was produced print a single 0.
1160 */
1161 if (skip_empty)
1162 seq_putc(seq, '0');
1163
1164 seq_putc(seq, '\n');
1165}
1166
1167static int input_devices_seq_show(struct seq_file *seq, void *v)
1168{
1169 struct input_dev *dev = container_of(v, struct input_dev, node);
1170 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
1171 struct input_handle *handle;
1172
1173 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n",
1174 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version);
1175
1176 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : "");
1177 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : "");
1178 seq_printf(seq, "S: Sysfs=%s\n", path ? path : "");
1179 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : "");
1180 seq_puts(seq, "H: Handlers=");
1181
1182 list_for_each_entry(handle, &dev->h_list, d_node)
1183 seq_printf(seq, "%s ", handle->name);
1184 seq_putc(seq, '\n');
1185
1186 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX);
1187
1188 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX);
1189 if (test_bit(EV_KEY, dev->evbit))
1190 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX);
1191 if (test_bit(EV_REL, dev->evbit))
1192 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX);
1193 if (test_bit(EV_ABS, dev->evbit))
1194 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX);
1195 if (test_bit(EV_MSC, dev->evbit))
1196 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX);
1197 if (test_bit(EV_LED, dev->evbit))
1198 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX);
1199 if (test_bit(EV_SND, dev->evbit))
1200 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX);
1201 if (test_bit(EV_FF, dev->evbit))
1202 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX);
1203 if (test_bit(EV_SW, dev->evbit))
1204 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX);
1205
1206 seq_putc(seq, '\n');
1207
1208 kfree(path);
1209 return 0;
1210}
1211
1212static const struct seq_operations input_devices_seq_ops = {
1213 .start = input_devices_seq_start,
1214 .next = input_devices_seq_next,
1215 .stop = input_seq_stop,
1216 .show = input_devices_seq_show,
1217};
1218
1219static int input_proc_devices_open(struct inode *inode, struct file *file)
1220{
1221 return seq_open(file, &input_devices_seq_ops);
1222}
1223
1224static const struct proc_ops input_devices_proc_ops = {
1225 .proc_open = input_proc_devices_open,
1226 .proc_poll = input_proc_devices_poll,
1227 .proc_read = seq_read,
1228 .proc_lseek = seq_lseek,
1229 .proc_release = seq_release,
1230};
1231
1232static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos)
1233{
1234 union input_seq_state *state = (union input_seq_state *)&seq->private;
1235 int error;
1236
1237 /* We need to fit into seq->private pointer */
1238 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private));
1239
1240 error = mutex_lock_interruptible(&input_mutex);
1241 if (error) {
1242 state->mutex_acquired = false;
1243 return ERR_PTR(error);
1244 }
1245
1246 state->mutex_acquired = true;
1247 state->pos = *pos;
1248
1249 return seq_list_start(&input_handler_list, *pos);
1250}
1251
1252static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1253{
1254 union input_seq_state *state = (union input_seq_state *)&seq->private;
1255
1256 state->pos = *pos + 1;
1257 return seq_list_next(v, &input_handler_list, pos);
1258}
1259
1260static int input_handlers_seq_show(struct seq_file *seq, void *v)
1261{
1262 struct input_handler *handler = container_of(v, struct input_handler, node);
1263 union input_seq_state *state = (union input_seq_state *)&seq->private;
1264
1265 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name);
1266 if (handler->filter)
1267 seq_puts(seq, " (filter)");
1268 if (handler->legacy_minors)
1269 seq_printf(seq, " Minor=%d", handler->minor);
1270 seq_putc(seq, '\n');
1271
1272 return 0;
1273}
1274
1275static const struct seq_operations input_handlers_seq_ops = {
1276 .start = input_handlers_seq_start,
1277 .next = input_handlers_seq_next,
1278 .stop = input_seq_stop,
1279 .show = input_handlers_seq_show,
1280};
1281
1282static int input_proc_handlers_open(struct inode *inode, struct file *file)
1283{
1284 return seq_open(file, &input_handlers_seq_ops);
1285}
1286
1287static const struct proc_ops input_handlers_proc_ops = {
1288 .proc_open = input_proc_handlers_open,
1289 .proc_read = seq_read,
1290 .proc_lseek = seq_lseek,
1291 .proc_release = seq_release,
1292};
1293
1294static int __init input_proc_init(void)
1295{
1296 struct proc_dir_entry *entry;
1297
1298 proc_bus_input_dir = proc_mkdir("bus/input", NULL);
1299 if (!proc_bus_input_dir)
1300 return -ENOMEM;
1301
1302 entry = proc_create("devices", 0, proc_bus_input_dir,
1303 &input_devices_proc_ops);
1304 if (!entry)
1305 goto fail1;
1306
1307 entry = proc_create("handlers", 0, proc_bus_input_dir,
1308 &input_handlers_proc_ops);
1309 if (!entry)
1310 goto fail2;
1311
1312 return 0;
1313
1314 fail2: remove_proc_entry("devices", proc_bus_input_dir);
1315 fail1: remove_proc_entry("bus/input", NULL);
1316 return -ENOMEM;
1317}
1318
1319static void input_proc_exit(void)
1320{
1321 remove_proc_entry("devices", proc_bus_input_dir);
1322 remove_proc_entry("handlers", proc_bus_input_dir);
1323 remove_proc_entry("bus/input", NULL);
1324}
1325
1326#else /* !CONFIG_PROC_FS */
1327static inline void input_wakeup_procfs_readers(void) { }
1328static inline int input_proc_init(void) { return 0; }
1329static inline void input_proc_exit(void) { }
1330#endif
1331
1332#define INPUT_DEV_STRING_ATTR_SHOW(name) \
1333static ssize_t input_dev_show_##name(struct device *dev, \
1334 struct device_attribute *attr, \
1335 char *buf) \
1336{ \
1337 struct input_dev *input_dev = to_input_dev(dev); \
1338 \
1339 return scnprintf(buf, PAGE_SIZE, "%s\n", \
1340 input_dev->name ? input_dev->name : ""); \
1341} \
1342static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL)
1343
1344INPUT_DEV_STRING_ATTR_SHOW(name);
1345INPUT_DEV_STRING_ATTR_SHOW(phys);
1346INPUT_DEV_STRING_ATTR_SHOW(uniq);
1347
1348static int input_print_modalias_bits(char *buf, int size,
1349 char name, unsigned long *bm,
1350 unsigned int min_bit, unsigned int max_bit)
1351{
1352 int len = 0, i;
1353
1354 len += snprintf(buf, max(size, 0), "%c", name);
1355 for (i = min_bit; i < max_bit; i++)
1356 if (bm[BIT_WORD(i)] & BIT_MASK(i))
1357 len += snprintf(buf + len, max(size - len, 0), "%X,", i);
1358 return len;
1359}
1360
1361static int input_print_modalias(char *buf, int size, struct input_dev *id,
1362 int add_cr)
1363{
1364 int len;
1365
1366 len = snprintf(buf, max(size, 0),
1367 "input:b%04Xv%04Xp%04Xe%04X-",
1368 id->id.bustype, id->id.vendor,
1369 id->id.product, id->id.version);
1370
1371 len += input_print_modalias_bits(buf + len, size - len,
1372 'e', id->evbit, 0, EV_MAX);
1373 len += input_print_modalias_bits(buf + len, size - len,
1374 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX);
1375 len += input_print_modalias_bits(buf + len, size - len,
1376 'r', id->relbit, 0, REL_MAX);
1377 len += input_print_modalias_bits(buf + len, size - len,
1378 'a', id->absbit, 0, ABS_MAX);
1379 len += input_print_modalias_bits(buf + len, size - len,
1380 'm', id->mscbit, 0, MSC_MAX);
1381 len += input_print_modalias_bits(buf + len, size - len,
1382 'l', id->ledbit, 0, LED_MAX);
1383 len += input_print_modalias_bits(buf + len, size - len,
1384 's', id->sndbit, 0, SND_MAX);
1385 len += input_print_modalias_bits(buf + len, size - len,
1386 'f', id->ffbit, 0, FF_MAX);
1387 len += input_print_modalias_bits(buf + len, size - len,
1388 'w', id->swbit, 0, SW_MAX);
1389
1390 if (add_cr)
1391 len += snprintf(buf + len, max(size - len, 0), "\n");
1392
1393 return len;
1394}
1395
1396static ssize_t input_dev_show_modalias(struct device *dev,
1397 struct device_attribute *attr,
1398 char *buf)
1399{
1400 struct input_dev *id = to_input_dev(dev);
1401 ssize_t len;
1402
1403 len = input_print_modalias(buf, PAGE_SIZE, id, 1);
1404
1405 return min_t(int, len, PAGE_SIZE);
1406}
1407static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL);
1408
1409static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1410 int max, int add_cr);
1411
1412static ssize_t input_dev_show_properties(struct device *dev,
1413 struct device_attribute *attr,
1414 char *buf)
1415{
1416 struct input_dev *input_dev = to_input_dev(dev);
1417 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit,
1418 INPUT_PROP_MAX, true);
1419 return min_t(int, len, PAGE_SIZE);
1420}
1421static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL);
1422
1423static int input_inhibit_device(struct input_dev *dev);
1424static int input_uninhibit_device(struct input_dev *dev);
1425
1426static ssize_t inhibited_show(struct device *dev,
1427 struct device_attribute *attr,
1428 char *buf)
1429{
1430 struct input_dev *input_dev = to_input_dev(dev);
1431
1432 return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited);
1433}
1434
1435static ssize_t inhibited_store(struct device *dev,
1436 struct device_attribute *attr, const char *buf,
1437 size_t len)
1438{
1439 struct input_dev *input_dev = to_input_dev(dev);
1440 ssize_t rv;
1441 bool inhibited;
1442
1443 if (strtobool(buf, &inhibited))
1444 return -EINVAL;
1445
1446 if (inhibited)
1447 rv = input_inhibit_device(input_dev);
1448 else
1449 rv = input_uninhibit_device(input_dev);
1450
1451 if (rv != 0)
1452 return rv;
1453
1454 return len;
1455}
1456
1457static DEVICE_ATTR_RW(inhibited);
1458
1459static struct attribute *input_dev_attrs[] = {
1460 &dev_attr_name.attr,
1461 &dev_attr_phys.attr,
1462 &dev_attr_uniq.attr,
1463 &dev_attr_modalias.attr,
1464 &dev_attr_properties.attr,
1465 &dev_attr_inhibited.attr,
1466 NULL
1467};
1468
1469static const struct attribute_group input_dev_attr_group = {
1470 .attrs = input_dev_attrs,
1471};
1472
1473#define INPUT_DEV_ID_ATTR(name) \
1474static ssize_t input_dev_show_id_##name(struct device *dev, \
1475 struct device_attribute *attr, \
1476 char *buf) \
1477{ \
1478 struct input_dev *input_dev = to_input_dev(dev); \
1479 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \
1480} \
1481static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL)
1482
1483INPUT_DEV_ID_ATTR(bustype);
1484INPUT_DEV_ID_ATTR(vendor);
1485INPUT_DEV_ID_ATTR(product);
1486INPUT_DEV_ID_ATTR(version);
1487
1488static struct attribute *input_dev_id_attrs[] = {
1489 &dev_attr_bustype.attr,
1490 &dev_attr_vendor.attr,
1491 &dev_attr_product.attr,
1492 &dev_attr_version.attr,
1493 NULL
1494};
1495
1496static const struct attribute_group input_dev_id_attr_group = {
1497 .name = "id",
1498 .attrs = input_dev_id_attrs,
1499};
1500
1501static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap,
1502 int max, int add_cr)
1503{
1504 int i;
1505 int len = 0;
1506 bool skip_empty = true;
1507
1508 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) {
1509 len += input_bits_to_string(buf + len, max(buf_size - len, 0),
1510 bitmap[i], skip_empty);
1511 if (len) {
1512 skip_empty = false;
1513 if (i > 0)
1514 len += snprintf(buf + len, max(buf_size - len, 0), " ");
1515 }
1516 }
1517
1518 /*
1519 * If no output was produced print a single 0.
1520 */
1521 if (len == 0)
1522 len = snprintf(buf, buf_size, "%d", 0);
1523
1524 if (add_cr)
1525 len += snprintf(buf + len, max(buf_size - len, 0), "\n");
1526
1527 return len;
1528}
1529
1530#define INPUT_DEV_CAP_ATTR(ev, bm) \
1531static ssize_t input_dev_show_cap_##bm(struct device *dev, \
1532 struct device_attribute *attr, \
1533 char *buf) \
1534{ \
1535 struct input_dev *input_dev = to_input_dev(dev); \
1536 int len = input_print_bitmap(buf, PAGE_SIZE, \
1537 input_dev->bm##bit, ev##_MAX, \
1538 true); \
1539 return min_t(int, len, PAGE_SIZE); \
1540} \
1541static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL)
1542
1543INPUT_DEV_CAP_ATTR(EV, ev);
1544INPUT_DEV_CAP_ATTR(KEY, key);
1545INPUT_DEV_CAP_ATTR(REL, rel);
1546INPUT_DEV_CAP_ATTR(ABS, abs);
1547INPUT_DEV_CAP_ATTR(MSC, msc);
1548INPUT_DEV_CAP_ATTR(LED, led);
1549INPUT_DEV_CAP_ATTR(SND, snd);
1550INPUT_DEV_CAP_ATTR(FF, ff);
1551INPUT_DEV_CAP_ATTR(SW, sw);
1552
1553static struct attribute *input_dev_caps_attrs[] = {
1554 &dev_attr_ev.attr,
1555 &dev_attr_key.attr,
1556 &dev_attr_rel.attr,
1557 &dev_attr_abs.attr,
1558 &dev_attr_msc.attr,
1559 &dev_attr_led.attr,
1560 &dev_attr_snd.attr,
1561 &dev_attr_ff.attr,
1562 &dev_attr_sw.attr,
1563 NULL
1564};
1565
1566static const struct attribute_group input_dev_caps_attr_group = {
1567 .name = "capabilities",
1568 .attrs = input_dev_caps_attrs,
1569};
1570
1571static const struct attribute_group *input_dev_attr_groups[] = {
1572 &input_dev_attr_group,
1573 &input_dev_id_attr_group,
1574 &input_dev_caps_attr_group,
1575 &input_poller_attribute_group,
1576 NULL
1577};
1578
1579static void input_dev_release(struct device *device)
1580{
1581 struct input_dev *dev = to_input_dev(device);
1582
1583 input_ff_destroy(dev);
1584 input_mt_destroy_slots(dev);
1585 kfree(dev->poller);
1586 kfree(dev->absinfo);
1587 kfree(dev->vals);
1588 kfree(dev);
1589
1590 module_put(THIS_MODULE);
1591}
1592
1593/*
1594 * Input uevent interface - loading event handlers based on
1595 * device bitfields.
1596 */
1597static int input_add_uevent_bm_var(struct kobj_uevent_env *env,
1598 const char *name, unsigned long *bitmap, int max)
1599{
1600 int len;
1601
1602 if (add_uevent_var(env, "%s", name))
1603 return -ENOMEM;
1604
1605 len = input_print_bitmap(&env->buf[env->buflen - 1],
1606 sizeof(env->buf) - env->buflen,
1607 bitmap, max, false);
1608 if (len >= (sizeof(env->buf) - env->buflen))
1609 return -ENOMEM;
1610
1611 env->buflen += len;
1612 return 0;
1613}
1614
1615static int input_add_uevent_modalias_var(struct kobj_uevent_env *env,
1616 struct input_dev *dev)
1617{
1618 int len;
1619
1620 if (add_uevent_var(env, "MODALIAS="))
1621 return -ENOMEM;
1622
1623 len = input_print_modalias(&env->buf[env->buflen - 1],
1624 sizeof(env->buf) - env->buflen,
1625 dev, 0);
1626 if (len >= (sizeof(env->buf) - env->buflen))
1627 return -ENOMEM;
1628
1629 env->buflen += len;
1630 return 0;
1631}
1632
1633#define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \
1634 do { \
1635 int err = add_uevent_var(env, fmt, val); \
1636 if (err) \
1637 return err; \
1638 } while (0)
1639
1640#define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \
1641 do { \
1642 int err = input_add_uevent_bm_var(env, name, bm, max); \
1643 if (err) \
1644 return err; \
1645 } while (0)
1646
1647#define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \
1648 do { \
1649 int err = input_add_uevent_modalias_var(env, dev); \
1650 if (err) \
1651 return err; \
1652 } while (0)
1653
1654static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1655{
1656 struct input_dev *dev = to_input_dev(device);
1657
1658 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x",
1659 dev->id.bustype, dev->id.vendor,
1660 dev->id.product, dev->id.version);
1661 if (dev->name)
1662 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name);
1663 if (dev->phys)
1664 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys);
1665 if (dev->uniq)
1666 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq);
1667
1668 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX);
1669
1670 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX);
1671 if (test_bit(EV_KEY, dev->evbit))
1672 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX);
1673 if (test_bit(EV_REL, dev->evbit))
1674 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX);
1675 if (test_bit(EV_ABS, dev->evbit))
1676 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX);
1677 if (test_bit(EV_MSC, dev->evbit))
1678 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX);
1679 if (test_bit(EV_LED, dev->evbit))
1680 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX);
1681 if (test_bit(EV_SND, dev->evbit))
1682 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX);
1683 if (test_bit(EV_FF, dev->evbit))
1684 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX);
1685 if (test_bit(EV_SW, dev->evbit))
1686 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX);
1687
1688 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev);
1689
1690 return 0;
1691}
1692
1693#define INPUT_DO_TOGGLE(dev, type, bits, on) \
1694 do { \
1695 int i; \
1696 bool active; \
1697 \
1698 if (!test_bit(EV_##type, dev->evbit)) \
1699 break; \
1700 \
1701 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \
1702 active = test_bit(i, dev->bits); \
1703 if (!active && !on) \
1704 continue; \
1705 \
1706 dev->event(dev, EV_##type, i, on ? active : 0); \
1707 } \
1708 } while (0)
1709
1710static void input_dev_toggle(struct input_dev *dev, bool activate)
1711{
1712 if (!dev->event)
1713 return;
1714
1715 INPUT_DO_TOGGLE(dev, LED, led, activate);
1716 INPUT_DO_TOGGLE(dev, SND, snd, activate);
1717
1718 if (activate && test_bit(EV_REP, dev->evbit)) {
1719 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]);
1720 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]);
1721 }
1722}
1723
1724/**
1725 * input_reset_device() - reset/restore the state of input device
1726 * @dev: input device whose state needs to be reset
1727 *
1728 * This function tries to reset the state of an opened input device and
1729 * bring internal state and state if the hardware in sync with each other.
1730 * We mark all keys as released, restore LED state, repeat rate, etc.
1731 */
1732void input_reset_device(struct input_dev *dev)
1733{
1734 unsigned long flags;
1735
1736 mutex_lock(&dev->mutex);
1737 spin_lock_irqsave(&dev->event_lock, flags);
1738
1739 input_dev_toggle(dev, true);
1740 input_dev_release_keys(dev);
1741
1742 spin_unlock_irqrestore(&dev->event_lock, flags);
1743 mutex_unlock(&dev->mutex);
1744}
1745EXPORT_SYMBOL(input_reset_device);
1746
1747static int input_inhibit_device(struct input_dev *dev)
1748{
1749 int ret = 0;
1750
1751 mutex_lock(&dev->mutex);
1752
1753 if (dev->inhibited)
1754 goto out;
1755
1756 if (dev->users) {
1757 if (dev->close)
1758 dev->close(dev);
1759 if (dev->poller)
1760 input_dev_poller_stop(dev->poller);
1761 }
1762
1763 spin_lock_irq(&dev->event_lock);
1764 input_dev_release_keys(dev);
1765 input_dev_toggle(dev, false);
1766 spin_unlock_irq(&dev->event_lock);
1767
1768 dev->inhibited = true;
1769
1770out:
1771 mutex_unlock(&dev->mutex);
1772 return ret;
1773}
1774
1775static int input_uninhibit_device(struct input_dev *dev)
1776{
1777 int ret = 0;
1778
1779 mutex_lock(&dev->mutex);
1780
1781 if (!dev->inhibited)
1782 goto out;
1783
1784 if (dev->users) {
1785 if (dev->open) {
1786 ret = dev->open(dev);
1787 if (ret)
1788 goto out;
1789 }
1790 if (dev->poller)
1791 input_dev_poller_start(dev->poller);
1792 }
1793
1794 dev->inhibited = false;
1795 spin_lock_irq(&dev->event_lock);
1796 input_dev_toggle(dev, true);
1797 spin_unlock_irq(&dev->event_lock);
1798
1799out:
1800 mutex_unlock(&dev->mutex);
1801 return ret;
1802}
1803
1804#ifdef CONFIG_PM_SLEEP
1805static int input_dev_suspend(struct device *dev)
1806{
1807 struct input_dev *input_dev = to_input_dev(dev);
1808
1809 spin_lock_irq(&input_dev->event_lock);
1810
1811 /*
1812 * Keys that are pressed now are unlikely to be
1813 * still pressed when we resume.
1814 */
1815 input_dev_release_keys(input_dev);
1816
1817 /* Turn off LEDs and sounds, if any are active. */
1818 input_dev_toggle(input_dev, false);
1819
1820 spin_unlock_irq(&input_dev->event_lock);
1821
1822 return 0;
1823}
1824
1825static int input_dev_resume(struct device *dev)
1826{
1827 struct input_dev *input_dev = to_input_dev(dev);
1828
1829 spin_lock_irq(&input_dev->event_lock);
1830
1831 /* Restore state of LEDs and sounds, if any were active. */
1832 input_dev_toggle(input_dev, true);
1833
1834 spin_unlock_irq(&input_dev->event_lock);
1835
1836 return 0;
1837}
1838
1839static int input_dev_freeze(struct device *dev)
1840{
1841 struct input_dev *input_dev = to_input_dev(dev);
1842
1843 spin_lock_irq(&input_dev->event_lock);
1844
1845 /*
1846 * Keys that are pressed now are unlikely to be
1847 * still pressed when we resume.
1848 */
1849 input_dev_release_keys(input_dev);
1850
1851 spin_unlock_irq(&input_dev->event_lock);
1852
1853 return 0;
1854}
1855
1856static int input_dev_poweroff(struct device *dev)
1857{
1858 struct input_dev *input_dev = to_input_dev(dev);
1859
1860 spin_lock_irq(&input_dev->event_lock);
1861
1862 /* Turn off LEDs and sounds, if any are active. */
1863 input_dev_toggle(input_dev, false);
1864
1865 spin_unlock_irq(&input_dev->event_lock);
1866
1867 return 0;
1868}
1869
1870static const struct dev_pm_ops input_dev_pm_ops = {
1871 .suspend = input_dev_suspend,
1872 .resume = input_dev_resume,
1873 .freeze = input_dev_freeze,
1874 .poweroff = input_dev_poweroff,
1875 .restore = input_dev_resume,
1876};
1877#endif /* CONFIG_PM */
1878
1879static const struct device_type input_dev_type = {
1880 .groups = input_dev_attr_groups,
1881 .release = input_dev_release,
1882 .uevent = input_dev_uevent,
1883#ifdef CONFIG_PM_SLEEP
1884 .pm = &input_dev_pm_ops,
1885#endif
1886};
1887
1888static char *input_devnode(struct device *dev, umode_t *mode)
1889{
1890 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev));
1891}
1892
1893struct class input_class = {
1894 .name = "input",
1895 .devnode = input_devnode,
1896};
1897EXPORT_SYMBOL_GPL(input_class);
1898
1899/**
1900 * input_allocate_device - allocate memory for new input device
1901 *
1902 * Returns prepared struct input_dev or %NULL.
1903 *
1904 * NOTE: Use input_free_device() to free devices that have not been
1905 * registered; input_unregister_device() should be used for already
1906 * registered devices.
1907 */
1908struct input_dev *input_allocate_device(void)
1909{
1910 static atomic_t input_no = ATOMIC_INIT(-1);
1911 struct input_dev *dev;
1912
1913 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1914 if (dev) {
1915 dev->dev.type = &input_dev_type;
1916 dev->dev.class = &input_class;
1917 device_initialize(&dev->dev);
1918 mutex_init(&dev->mutex);
1919 spin_lock_init(&dev->event_lock);
1920 timer_setup(&dev->timer, NULL, 0);
1921 INIT_LIST_HEAD(&dev->h_list);
1922 INIT_LIST_HEAD(&dev->node);
1923
1924 dev_set_name(&dev->dev, "input%lu",
1925 (unsigned long)atomic_inc_return(&input_no));
1926
1927 __module_get(THIS_MODULE);
1928 }
1929
1930 return dev;
1931}
1932EXPORT_SYMBOL(input_allocate_device);
1933
1934struct input_devres {
1935 struct input_dev *input;
1936};
1937
1938static int devm_input_device_match(struct device *dev, void *res, void *data)
1939{
1940 struct input_devres *devres = res;
1941
1942 return devres->input == data;
1943}
1944
1945static void devm_input_device_release(struct device *dev, void *res)
1946{
1947 struct input_devres *devres = res;
1948 struct input_dev *input = devres->input;
1949
1950 dev_dbg(dev, "%s: dropping reference to %s\n",
1951 __func__, dev_name(&input->dev));
1952 input_put_device(input);
1953}
1954
1955/**
1956 * devm_input_allocate_device - allocate managed input device
1957 * @dev: device owning the input device being created
1958 *
1959 * Returns prepared struct input_dev or %NULL.
1960 *
1961 * Managed input devices do not need to be explicitly unregistered or
1962 * freed as it will be done automatically when owner device unbinds from
1963 * its driver (or binding fails). Once managed input device is allocated,
1964 * it is ready to be set up and registered in the same fashion as regular
1965 * input device. There are no special devm_input_device_[un]register()
1966 * variants, regular ones work with both managed and unmanaged devices,
1967 * should you need them. In most cases however, managed input device need
1968 * not be explicitly unregistered or freed.
1969 *
1970 * NOTE: the owner device is set up as parent of input device and users
1971 * should not override it.
1972 */
1973struct input_dev *devm_input_allocate_device(struct device *dev)
1974{
1975 struct input_dev *input;
1976 struct input_devres *devres;
1977
1978 devres = devres_alloc(devm_input_device_release,
1979 sizeof(*devres), GFP_KERNEL);
1980 if (!devres)
1981 return NULL;
1982
1983 input = input_allocate_device();
1984 if (!input) {
1985 devres_free(devres);
1986 return NULL;
1987 }
1988
1989 input->dev.parent = dev;
1990 input->devres_managed = true;
1991
1992 devres->input = input;
1993 devres_add(dev, devres);
1994
1995 return input;
1996}
1997EXPORT_SYMBOL(devm_input_allocate_device);
1998
1999/**
2000 * input_free_device - free memory occupied by input_dev structure
2001 * @dev: input device to free
2002 *
2003 * This function should only be used if input_register_device()
2004 * was not called yet or if it failed. Once device was registered
2005 * use input_unregister_device() and memory will be freed once last
2006 * reference to the device is dropped.
2007 *
2008 * Device should be allocated by input_allocate_device().
2009 *
2010 * NOTE: If there are references to the input device then memory
2011 * will not be freed until last reference is dropped.
2012 */
2013void input_free_device(struct input_dev *dev)
2014{
2015 if (dev) {
2016 if (dev->devres_managed)
2017 WARN_ON(devres_destroy(dev->dev.parent,
2018 devm_input_device_release,
2019 devm_input_device_match,
2020 dev));
2021 input_put_device(dev);
2022 }
2023}
2024EXPORT_SYMBOL(input_free_device);
2025
2026/**
2027 * input_set_timestamp - set timestamp for input events
2028 * @dev: input device to set timestamp for
2029 * @timestamp: the time at which the event has occurred
2030 * in CLOCK_MONOTONIC
2031 *
2032 * This function is intended to provide to the input system a more
2033 * accurate time of when an event actually occurred. The driver should
2034 * call this function as soon as a timestamp is acquired ensuring
2035 * clock conversions in input_set_timestamp are done correctly.
2036 *
2037 * The system entering suspend state between timestamp acquisition and
2038 * calling input_set_timestamp can result in inaccurate conversions.
2039 */
2040void input_set_timestamp(struct input_dev *dev, ktime_t timestamp)
2041{
2042 dev->timestamp[INPUT_CLK_MONO] = timestamp;
2043 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp);
2044 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp,
2045 TK_OFFS_BOOT);
2046}
2047EXPORT_SYMBOL(input_set_timestamp);
2048
2049/**
2050 * input_get_timestamp - get timestamp for input events
2051 * @dev: input device to get timestamp from
2052 *
2053 * A valid timestamp is a timestamp of non-zero value.
2054 */
2055ktime_t *input_get_timestamp(struct input_dev *dev)
2056{
2057 const ktime_t invalid_timestamp = ktime_set(0, 0);
2058
2059 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp))
2060 input_set_timestamp(dev, ktime_get());
2061
2062 return dev->timestamp;
2063}
2064EXPORT_SYMBOL(input_get_timestamp);
2065
2066/**
2067 * input_set_capability - mark device as capable of a certain event
2068 * @dev: device that is capable of emitting or accepting event
2069 * @type: type of the event (EV_KEY, EV_REL, etc...)
2070 * @code: event code
2071 *
2072 * In addition to setting up corresponding bit in appropriate capability
2073 * bitmap the function also adjusts dev->evbit.
2074 */
2075void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code)
2076{
2077 switch (type) {
2078 case EV_KEY:
2079 __set_bit(code, dev->keybit);
2080 break;
2081
2082 case EV_REL:
2083 __set_bit(code, dev->relbit);
2084 break;
2085
2086 case EV_ABS:
2087 input_alloc_absinfo(dev);
2088 if (!dev->absinfo)
2089 return;
2090
2091 __set_bit(code, dev->absbit);
2092 break;
2093
2094 case EV_MSC:
2095 __set_bit(code, dev->mscbit);
2096 break;
2097
2098 case EV_SW:
2099 __set_bit(code, dev->swbit);
2100 break;
2101
2102 case EV_LED:
2103 __set_bit(code, dev->ledbit);
2104 break;
2105
2106 case EV_SND:
2107 __set_bit(code, dev->sndbit);
2108 break;
2109
2110 case EV_FF:
2111 __set_bit(code, dev->ffbit);
2112 break;
2113
2114 case EV_PWR:
2115 /* do nothing */
2116 break;
2117
2118 default:
2119 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code);
2120 dump_stack();
2121 return;
2122 }
2123
2124 __set_bit(type, dev->evbit);
2125}
2126EXPORT_SYMBOL(input_set_capability);
2127
2128static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
2129{
2130 int mt_slots;
2131 int i;
2132 unsigned int events;
2133
2134 if (dev->mt) {
2135 mt_slots = dev->mt->num_slots;
2136 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) {
2137 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum -
2138 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1,
2139 mt_slots = clamp(mt_slots, 2, 32);
2140 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) {
2141 mt_slots = 2;
2142 } else {
2143 mt_slots = 0;
2144 }
2145
2146 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
2147
2148 if (test_bit(EV_ABS, dev->evbit))
2149 for_each_set_bit(i, dev->absbit, ABS_CNT)
2150 events += input_is_mt_axis(i) ? mt_slots : 1;
2151
2152 if (test_bit(EV_REL, dev->evbit))
2153 events += bitmap_weight(dev->relbit, REL_CNT);
2154
2155 /* Make room for KEY and MSC events */
2156 events += 7;
2157
2158 return events;
2159}
2160
2161#define INPUT_CLEANSE_BITMASK(dev, type, bits) \
2162 do { \
2163 if (!test_bit(EV_##type, dev->evbit)) \
2164 memset(dev->bits##bit, 0, \
2165 sizeof(dev->bits##bit)); \
2166 } while (0)
2167
2168static void input_cleanse_bitmasks(struct input_dev *dev)
2169{
2170 INPUT_CLEANSE_BITMASK(dev, KEY, key);
2171 INPUT_CLEANSE_BITMASK(dev, REL, rel);
2172 INPUT_CLEANSE_BITMASK(dev, ABS, abs);
2173 INPUT_CLEANSE_BITMASK(dev, MSC, msc);
2174 INPUT_CLEANSE_BITMASK(dev, LED, led);
2175 INPUT_CLEANSE_BITMASK(dev, SND, snd);
2176 INPUT_CLEANSE_BITMASK(dev, FF, ff);
2177 INPUT_CLEANSE_BITMASK(dev, SW, sw);
2178}
2179
2180static void __input_unregister_device(struct input_dev *dev)
2181{
2182 struct input_handle *handle, *next;
2183
2184 input_disconnect_device(dev);
2185
2186 mutex_lock(&input_mutex);
2187
2188 list_for_each_entry_safe(handle, next, &dev->h_list, d_node)
2189 handle->handler->disconnect(handle);
2190 WARN_ON(!list_empty(&dev->h_list));
2191
2192 del_timer_sync(&dev->timer);
2193 list_del_init(&dev->node);
2194
2195 input_wakeup_procfs_readers();
2196
2197 mutex_unlock(&input_mutex);
2198
2199 device_del(&dev->dev);
2200}
2201
2202static void devm_input_device_unregister(struct device *dev, void *res)
2203{
2204 struct input_devres *devres = res;
2205 struct input_dev *input = devres->input;
2206
2207 dev_dbg(dev, "%s: unregistering device %s\n",
2208 __func__, dev_name(&input->dev));
2209 __input_unregister_device(input);
2210}
2211
2212/**
2213 * input_enable_softrepeat - enable software autorepeat
2214 * @dev: input device
2215 * @delay: repeat delay
2216 * @period: repeat period
2217 *
2218 * Enable software autorepeat on the input device.
2219 */
2220void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
2221{
2222 dev->timer.function = input_repeat_key;
2223 dev->rep[REP_DELAY] = delay;
2224 dev->rep[REP_PERIOD] = period;
2225}
2226EXPORT_SYMBOL(input_enable_softrepeat);
2227
2228bool input_device_enabled(struct input_dev *dev)
2229{
2230 lockdep_assert_held(&dev->mutex);
2231
2232 return !dev->inhibited && dev->users > 0;
2233}
2234EXPORT_SYMBOL_GPL(input_device_enabled);
2235
2236/**
2237 * input_register_device - register device with input core
2238 * @dev: device to be registered
2239 *
2240 * This function registers device with input core. The device must be
2241 * allocated with input_allocate_device() and all it's capabilities
2242 * set up before registering.
2243 * If function fails the device must be freed with input_free_device().
2244 * Once device has been successfully registered it can be unregistered
2245 * with input_unregister_device(); input_free_device() should not be
2246 * called in this case.
2247 *
2248 * Note that this function is also used to register managed input devices
2249 * (ones allocated with devm_input_allocate_device()). Such managed input
2250 * devices need not be explicitly unregistered or freed, their tear down
2251 * is controlled by the devres infrastructure. It is also worth noting
2252 * that tear down of managed input devices is internally a 2-step process:
2253 * registered managed input device is first unregistered, but stays in
2254 * memory and can still handle input_event() calls (although events will
2255 * not be delivered anywhere). The freeing of managed input device will
2256 * happen later, when devres stack is unwound to the point where device
2257 * allocation was made.
2258 */
2259int input_register_device(struct input_dev *dev)
2260{
2261 struct input_devres *devres = NULL;
2262 struct input_handler *handler;
2263 unsigned int packet_size;
2264 const char *path;
2265 int error;
2266
2267 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) {
2268 dev_err(&dev->dev,
2269 "Absolute device without dev->absinfo, refusing to register\n");
2270 return -EINVAL;
2271 }
2272
2273 if (dev->devres_managed) {
2274 devres = devres_alloc(devm_input_device_unregister,
2275 sizeof(*devres), GFP_KERNEL);
2276 if (!devres)
2277 return -ENOMEM;
2278
2279 devres->input = dev;
2280 }
2281
2282 /* Every input device generates EV_SYN/SYN_REPORT events. */
2283 __set_bit(EV_SYN, dev->evbit);
2284
2285 /* KEY_RESERVED is not supposed to be transmitted to userspace. */
2286 __clear_bit(KEY_RESERVED, dev->keybit);
2287
2288 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */
2289 input_cleanse_bitmasks(dev);
2290
2291 packet_size = input_estimate_events_per_packet(dev);
2292 if (dev->hint_events_per_packet < packet_size)
2293 dev->hint_events_per_packet = packet_size;
2294
2295 dev->max_vals = dev->hint_events_per_packet + 2;
2296 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL);
2297 if (!dev->vals) {
2298 error = -ENOMEM;
2299 goto err_devres_free;
2300 }
2301
2302 /*
2303 * If delay and period are pre-set by the driver, then autorepeating
2304 * is handled by the driver itself and we don't do it in input.c.
2305 */
2306 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD])
2307 input_enable_softrepeat(dev, 250, 33);
2308
2309 if (!dev->getkeycode)
2310 dev->getkeycode = input_default_getkeycode;
2311
2312 if (!dev->setkeycode)
2313 dev->setkeycode = input_default_setkeycode;
2314
2315 if (dev->poller)
2316 input_dev_poller_finalize(dev->poller);
2317
2318 error = device_add(&dev->dev);
2319 if (error)
2320 goto err_free_vals;
2321
2322 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL);
2323 pr_info("%s as %s\n",
2324 dev->name ? dev->name : "Unspecified device",
2325 path ? path : "N/A");
2326 kfree(path);
2327
2328 error = mutex_lock_interruptible(&input_mutex);
2329 if (error)
2330 goto err_device_del;
2331
2332 list_add_tail(&dev->node, &input_dev_list);
2333
2334 list_for_each_entry(handler, &input_handler_list, node)
2335 input_attach_handler(dev, handler);
2336
2337 input_wakeup_procfs_readers();
2338
2339 mutex_unlock(&input_mutex);
2340
2341 if (dev->devres_managed) {
2342 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n",
2343 __func__, dev_name(&dev->dev));
2344 devres_add(dev->dev.parent, devres);
2345 }
2346 return 0;
2347
2348err_device_del:
2349 device_del(&dev->dev);
2350err_free_vals:
2351 kfree(dev->vals);
2352 dev->vals = NULL;
2353err_devres_free:
2354 devres_free(devres);
2355 return error;
2356}
2357EXPORT_SYMBOL(input_register_device);
2358
2359/**
2360 * input_unregister_device - unregister previously registered device
2361 * @dev: device to be unregistered
2362 *
2363 * This function unregisters an input device. Once device is unregistered
2364 * the caller should not try to access it as it may get freed at any moment.
2365 */
2366void input_unregister_device(struct input_dev *dev)
2367{
2368 if (dev->devres_managed) {
2369 WARN_ON(devres_destroy(dev->dev.parent,
2370 devm_input_device_unregister,
2371 devm_input_device_match,
2372 dev));
2373 __input_unregister_device(dev);
2374 /*
2375 * We do not do input_put_device() here because it will be done
2376 * when 2nd devres fires up.
2377 */
2378 } else {
2379 __input_unregister_device(dev);
2380 input_put_device(dev);
2381 }
2382}
2383EXPORT_SYMBOL(input_unregister_device);
2384
2385/**
2386 * input_register_handler - register a new input handler
2387 * @handler: handler to be registered
2388 *
2389 * This function registers a new input handler (interface) for input
2390 * devices in the system and attaches it to all input devices that
2391 * are compatible with the handler.
2392 */
2393int input_register_handler(struct input_handler *handler)
2394{
2395 struct input_dev *dev;
2396 int error;
2397
2398 error = mutex_lock_interruptible(&input_mutex);
2399 if (error)
2400 return error;
2401
2402 INIT_LIST_HEAD(&handler->h_list);
2403
2404 list_add_tail(&handler->node, &input_handler_list);
2405
2406 list_for_each_entry(dev, &input_dev_list, node)
2407 input_attach_handler(dev, handler);
2408
2409 input_wakeup_procfs_readers();
2410
2411 mutex_unlock(&input_mutex);
2412 return 0;
2413}
2414EXPORT_SYMBOL(input_register_handler);
2415
2416/**
2417 * input_unregister_handler - unregisters an input handler
2418 * @handler: handler to be unregistered
2419 *
2420 * This function disconnects a handler from its input devices and
2421 * removes it from lists of known handlers.
2422 */
2423void input_unregister_handler(struct input_handler *handler)
2424{
2425 struct input_handle *handle, *next;
2426
2427 mutex_lock(&input_mutex);
2428
2429 list_for_each_entry_safe(handle, next, &handler->h_list, h_node)
2430 handler->disconnect(handle);
2431 WARN_ON(!list_empty(&handler->h_list));
2432
2433 list_del_init(&handler->node);
2434
2435 input_wakeup_procfs_readers();
2436
2437 mutex_unlock(&input_mutex);
2438}
2439EXPORT_SYMBOL(input_unregister_handler);
2440
2441/**
2442 * input_handler_for_each_handle - handle iterator
2443 * @handler: input handler to iterate
2444 * @data: data for the callback
2445 * @fn: function to be called for each handle
2446 *
2447 * Iterate over @bus's list of devices, and call @fn for each, passing
2448 * it @data and stop when @fn returns a non-zero value. The function is
2449 * using RCU to traverse the list and therefore may be using in atomic
2450 * contexts. The @fn callback is invoked from RCU critical section and
2451 * thus must not sleep.
2452 */
2453int input_handler_for_each_handle(struct input_handler *handler, void *data,
2454 int (*fn)(struct input_handle *, void *))
2455{
2456 struct input_handle *handle;
2457 int retval = 0;
2458
2459 rcu_read_lock();
2460
2461 list_for_each_entry_rcu(handle, &handler->h_list, h_node) {
2462 retval = fn(handle, data);
2463 if (retval)
2464 break;
2465 }
2466
2467 rcu_read_unlock();
2468
2469 return retval;
2470}
2471EXPORT_SYMBOL(input_handler_for_each_handle);
2472
2473/**
2474 * input_register_handle - register a new input handle
2475 * @handle: handle to register
2476 *
2477 * This function puts a new input handle onto device's
2478 * and handler's lists so that events can flow through
2479 * it once it is opened using input_open_device().
2480 *
2481 * This function is supposed to be called from handler's
2482 * connect() method.
2483 */
2484int input_register_handle(struct input_handle *handle)
2485{
2486 struct input_handler *handler = handle->handler;
2487 struct input_dev *dev = handle->dev;
2488 int error;
2489
2490 /*
2491 * We take dev->mutex here to prevent race with
2492 * input_release_device().
2493 */
2494 error = mutex_lock_interruptible(&dev->mutex);
2495 if (error)
2496 return error;
2497
2498 /*
2499 * Filters go to the head of the list, normal handlers
2500 * to the tail.
2501 */
2502 if (handler->filter)
2503 list_add_rcu(&handle->d_node, &dev->h_list);
2504 else
2505 list_add_tail_rcu(&handle->d_node, &dev->h_list);
2506
2507 mutex_unlock(&dev->mutex);
2508
2509 /*
2510 * Since we are supposed to be called from ->connect()
2511 * which is mutually exclusive with ->disconnect()
2512 * we can't be racing with input_unregister_handle()
2513 * and so separate lock is not needed here.
2514 */
2515 list_add_tail_rcu(&handle->h_node, &handler->h_list);
2516
2517 if (handler->start)
2518 handler->start(handle);
2519
2520 return 0;
2521}
2522EXPORT_SYMBOL(input_register_handle);
2523
2524/**
2525 * input_unregister_handle - unregister an input handle
2526 * @handle: handle to unregister
2527 *
2528 * This function removes input handle from device's
2529 * and handler's lists.
2530 *
2531 * This function is supposed to be called from handler's
2532 * disconnect() method.
2533 */
2534void input_unregister_handle(struct input_handle *handle)
2535{
2536 struct input_dev *dev = handle->dev;
2537
2538 list_del_rcu(&handle->h_node);
2539
2540 /*
2541 * Take dev->mutex to prevent race with input_release_device().
2542 */
2543 mutex_lock(&dev->mutex);
2544 list_del_rcu(&handle->d_node);
2545 mutex_unlock(&dev->mutex);
2546
2547 synchronize_rcu();
2548}
2549EXPORT_SYMBOL(input_unregister_handle);
2550
2551/**
2552 * input_get_new_minor - allocates a new input minor number
2553 * @legacy_base: beginning or the legacy range to be searched
2554 * @legacy_num: size of legacy range
2555 * @allow_dynamic: whether we can also take ID from the dynamic range
2556 *
2557 * This function allocates a new device minor for from input major namespace.
2558 * Caller can request legacy minor by specifying @legacy_base and @legacy_num
2559 * parameters and whether ID can be allocated from dynamic range if there are
2560 * no free IDs in legacy range.
2561 */
2562int input_get_new_minor(int legacy_base, unsigned int legacy_num,
2563 bool allow_dynamic)
2564{
2565 /*
2566 * This function should be called from input handler's ->connect()
2567 * methods, which are serialized with input_mutex, so no additional
2568 * locking is needed here.
2569 */
2570 if (legacy_base >= 0) {
2571 int minor = ida_simple_get(&input_ida,
2572 legacy_base,
2573 legacy_base + legacy_num,
2574 GFP_KERNEL);
2575 if (minor >= 0 || !allow_dynamic)
2576 return minor;
2577 }
2578
2579 return ida_simple_get(&input_ida,
2580 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES,
2581 GFP_KERNEL);
2582}
2583EXPORT_SYMBOL(input_get_new_minor);
2584
2585/**
2586 * input_free_minor - release previously allocated minor
2587 * @minor: minor to be released
2588 *
2589 * This function releases previously allocated input minor so that it can be
2590 * reused later.
2591 */
2592void input_free_minor(unsigned int minor)
2593{
2594 ida_simple_remove(&input_ida, minor);
2595}
2596EXPORT_SYMBOL(input_free_minor);
2597
2598static int __init input_init(void)
2599{
2600 int err;
2601
2602 err = class_register(&input_class);
2603 if (err) {
2604 pr_err("unable to register input_dev class\n");
2605 return err;
2606 }
2607
2608 err = input_proc_init();
2609 if (err)
2610 goto fail1;
2611
2612 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2613 INPUT_MAX_CHAR_DEVICES, "input");
2614 if (err) {
2615 pr_err("unable to register char major %d", INPUT_MAJOR);
2616 goto fail2;
2617 }
2618
2619 return 0;
2620
2621 fail2: input_proc_exit();
2622 fail1: class_unregister(&input_class);
2623 return err;
2624}
2625
2626static void __exit input_exit(void)
2627{
2628 input_proc_exit();
2629 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0),
2630 INPUT_MAX_CHAR_DEVICES);
2631 class_unregister(&input_class);
2632}
2633
2634subsys_initcall(input_init);
2635module_exit(input_exit);