Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11/*
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/spinlock.h>
23#include <asm/unaligned.h>
24#include <asm/byteorder.h>
25#include <linux/input.h>
26#include <linux/wait.h>
27#include <linux/vmalloc.h>
28#include <linux/sched.h>
29#include <linux/semaphore.h>
30
31#include <linux/hid.h>
32#include <linux/hiddev.h>
33#include <linux/hid-debug.h>
34#include <linux/hidraw.h>
35
36#include "hid-ids.h"
37
38/*
39 * Version Information
40 */
41
42#define DRIVER_DESC "HID core driver"
43
44int hid_debug = 0;
45module_param_named(debug, hid_debug, int, 0600);
46MODULE_PARM_DESC(debug, "toggle HID debugging messages");
47EXPORT_SYMBOL_GPL(hid_debug);
48
49static int hid_ignore_special_drivers = 0;
50module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
51MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
52
53/*
54 * Register a new report for a device.
55 */
56
57struct hid_report *hid_register_report(struct hid_device *device,
58 enum hid_report_type type, unsigned int id,
59 unsigned int application)
60{
61 struct hid_report_enum *report_enum = device->report_enum + type;
62 struct hid_report *report;
63
64 if (id >= HID_MAX_IDS)
65 return NULL;
66 if (report_enum->report_id_hash[id])
67 return report_enum->report_id_hash[id];
68
69 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
70 if (!report)
71 return NULL;
72
73 if (id != 0)
74 report_enum->numbered = 1;
75
76 report->id = id;
77 report->type = type;
78 report->size = 0;
79 report->device = device;
80 report->application = application;
81 report_enum->report_id_hash[id] = report;
82
83 list_add_tail(&report->list, &report_enum->report_list);
84 INIT_LIST_HEAD(&report->field_entry_list);
85
86 return report;
87}
88EXPORT_SYMBOL_GPL(hid_register_report);
89
90/*
91 * Register a new field for this report.
92 */
93
94static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
95{
96 struct hid_field *field;
97
98 if (report->maxfield == HID_MAX_FIELDS) {
99 hid_err(report->device, "too many fields in report\n");
100 return NULL;
101 }
102
103 field = kzalloc((sizeof(struct hid_field) +
104 usages * sizeof(struct hid_usage) +
105 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
106 if (!field)
107 return NULL;
108
109 field->index = report->maxfield++;
110 report->field[field->index] = field;
111 field->usage = (struct hid_usage *)(field + 1);
112 field->value = (s32 *)(field->usage + usages);
113 field->new_value = (s32 *)(field->value + usages);
114 field->usages_priorities = (s32 *)(field->new_value + usages);
115 field->report = report;
116
117 return field;
118}
119
120/*
121 * Open a collection. The type/usage is pushed on the stack.
122 */
123
124static int open_collection(struct hid_parser *parser, unsigned type)
125{
126 struct hid_collection *collection;
127 unsigned usage;
128 int collection_index;
129
130 usage = parser->local.usage[0];
131
132 if (parser->collection_stack_ptr == parser->collection_stack_size) {
133 unsigned int *collection_stack;
134 unsigned int new_size = parser->collection_stack_size +
135 HID_COLLECTION_STACK_SIZE;
136
137 collection_stack = krealloc(parser->collection_stack,
138 new_size * sizeof(unsigned int),
139 GFP_KERNEL);
140 if (!collection_stack)
141 return -ENOMEM;
142
143 parser->collection_stack = collection_stack;
144 parser->collection_stack_size = new_size;
145 }
146
147 if (parser->device->maxcollection == parser->device->collection_size) {
148 collection = kmalloc(
149 array3_size(sizeof(struct hid_collection),
150 parser->device->collection_size,
151 2),
152 GFP_KERNEL);
153 if (collection == NULL) {
154 hid_err(parser->device, "failed to reallocate collection array\n");
155 return -ENOMEM;
156 }
157 memcpy(collection, parser->device->collection,
158 sizeof(struct hid_collection) *
159 parser->device->collection_size);
160 memset(collection + parser->device->collection_size, 0,
161 sizeof(struct hid_collection) *
162 parser->device->collection_size);
163 kfree(parser->device->collection);
164 parser->device->collection = collection;
165 parser->device->collection_size *= 2;
166 }
167
168 parser->collection_stack[parser->collection_stack_ptr++] =
169 parser->device->maxcollection;
170
171 collection_index = parser->device->maxcollection++;
172 collection = parser->device->collection + collection_index;
173 collection->type = type;
174 collection->usage = usage;
175 collection->level = parser->collection_stack_ptr - 1;
176 collection->parent_idx = (collection->level == 0) ? -1 :
177 parser->collection_stack[collection->level - 1];
178
179 if (type == HID_COLLECTION_APPLICATION)
180 parser->device->maxapplication++;
181
182 return 0;
183}
184
185/*
186 * Close a collection.
187 */
188
189static int close_collection(struct hid_parser *parser)
190{
191 if (!parser->collection_stack_ptr) {
192 hid_err(parser->device, "collection stack underflow\n");
193 return -EINVAL;
194 }
195 parser->collection_stack_ptr--;
196 return 0;
197}
198
199/*
200 * Climb up the stack, search for the specified collection type
201 * and return the usage.
202 */
203
204static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
205{
206 struct hid_collection *collection = parser->device->collection;
207 int n;
208
209 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
210 unsigned index = parser->collection_stack[n];
211 if (collection[index].type == type)
212 return collection[index].usage;
213 }
214 return 0; /* we know nothing about this usage type */
215}
216
217/*
218 * Concatenate usage which defines 16 bits or less with the
219 * currently defined usage page to form a 32 bit usage
220 */
221
222static void complete_usage(struct hid_parser *parser, unsigned int index)
223{
224 parser->local.usage[index] &= 0xFFFF;
225 parser->local.usage[index] |=
226 (parser->global.usage_page & 0xFFFF) << 16;
227}
228
229/*
230 * Add a usage to the temporary parser table.
231 */
232
233static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
234{
235 if (parser->local.usage_index >= HID_MAX_USAGES) {
236 hid_err(parser->device, "usage index exceeded\n");
237 return -1;
238 }
239 parser->local.usage[parser->local.usage_index] = usage;
240
241 /*
242 * If Usage item only includes usage id, concatenate it with
243 * currently defined usage page
244 */
245 if (size <= 2)
246 complete_usage(parser, parser->local.usage_index);
247
248 parser->local.usage_size[parser->local.usage_index] = size;
249 parser->local.collection_index[parser->local.usage_index] =
250 parser->collection_stack_ptr ?
251 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
252 parser->local.usage_index++;
253 return 0;
254}
255
256/*
257 * Register a new field for this report.
258 */
259
260static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
261{
262 struct hid_report *report;
263 struct hid_field *field;
264 unsigned int usages;
265 unsigned int offset;
266 unsigned int i;
267 unsigned int application;
268
269 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
270
271 report = hid_register_report(parser->device, report_type,
272 parser->global.report_id, application);
273 if (!report) {
274 hid_err(parser->device, "hid_register_report failed\n");
275 return -1;
276 }
277
278 /* Handle both signed and unsigned cases properly */
279 if ((parser->global.logical_minimum < 0 &&
280 parser->global.logical_maximum <
281 parser->global.logical_minimum) ||
282 (parser->global.logical_minimum >= 0 &&
283 (__u32)parser->global.logical_maximum <
284 (__u32)parser->global.logical_minimum)) {
285 dbg_hid("logical range invalid 0x%x 0x%x\n",
286 parser->global.logical_minimum,
287 parser->global.logical_maximum);
288 return -1;
289 }
290
291 offset = report->size;
292 report->size += parser->global.report_size * parser->global.report_count;
293
294 /* Total size check: Allow for possible report index byte */
295 if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) {
296 hid_err(parser->device, "report is too long\n");
297 return -1;
298 }
299
300 if (!parser->local.usage_index) /* Ignore padding fields */
301 return 0;
302
303 usages = max_t(unsigned, parser->local.usage_index,
304 parser->global.report_count);
305
306 field = hid_register_field(report, usages);
307 if (!field)
308 return 0;
309
310 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
311 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
312 field->application = application;
313
314 for (i = 0; i < usages; i++) {
315 unsigned j = i;
316 /* Duplicate the last usage we parsed if we have excess values */
317 if (i >= parser->local.usage_index)
318 j = parser->local.usage_index - 1;
319 field->usage[i].hid = parser->local.usage[j];
320 field->usage[i].collection_index =
321 parser->local.collection_index[j];
322 field->usage[i].usage_index = i;
323 field->usage[i].resolution_multiplier = 1;
324 }
325
326 field->maxusage = usages;
327 field->flags = flags;
328 field->report_offset = offset;
329 field->report_type = report_type;
330 field->report_size = parser->global.report_size;
331 field->report_count = parser->global.report_count;
332 field->logical_minimum = parser->global.logical_minimum;
333 field->logical_maximum = parser->global.logical_maximum;
334 field->physical_minimum = parser->global.physical_minimum;
335 field->physical_maximum = parser->global.physical_maximum;
336 field->unit_exponent = parser->global.unit_exponent;
337 field->unit = parser->global.unit;
338
339 return 0;
340}
341
342/*
343 * Read data value from item.
344 */
345
346static u32 item_udata(struct hid_item *item)
347{
348 switch (item->size) {
349 case 1: return item->data.u8;
350 case 2: return item->data.u16;
351 case 4: return item->data.u32;
352 }
353 return 0;
354}
355
356static s32 item_sdata(struct hid_item *item)
357{
358 switch (item->size) {
359 case 1: return item->data.s8;
360 case 2: return item->data.s16;
361 case 4: return item->data.s32;
362 }
363 return 0;
364}
365
366/*
367 * Process a global item.
368 */
369
370static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
371{
372 __s32 raw_value;
373 switch (item->tag) {
374 case HID_GLOBAL_ITEM_TAG_PUSH:
375
376 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
377 hid_err(parser->device, "global environment stack overflow\n");
378 return -1;
379 }
380
381 memcpy(parser->global_stack + parser->global_stack_ptr++,
382 &parser->global, sizeof(struct hid_global));
383 return 0;
384
385 case HID_GLOBAL_ITEM_TAG_POP:
386
387 if (!parser->global_stack_ptr) {
388 hid_err(parser->device, "global environment stack underflow\n");
389 return -1;
390 }
391
392 memcpy(&parser->global, parser->global_stack +
393 --parser->global_stack_ptr, sizeof(struct hid_global));
394 return 0;
395
396 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
397 parser->global.usage_page = item_udata(item);
398 return 0;
399
400 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
401 parser->global.logical_minimum = item_sdata(item);
402 return 0;
403
404 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
405 if (parser->global.logical_minimum < 0)
406 parser->global.logical_maximum = item_sdata(item);
407 else
408 parser->global.logical_maximum = item_udata(item);
409 return 0;
410
411 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
412 parser->global.physical_minimum = item_sdata(item);
413 return 0;
414
415 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
416 if (parser->global.physical_minimum < 0)
417 parser->global.physical_maximum = item_sdata(item);
418 else
419 parser->global.physical_maximum = item_udata(item);
420 return 0;
421
422 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
423 /* Many devices provide unit exponent as a two's complement
424 * nibble due to the common misunderstanding of HID
425 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
426 * both this and the standard encoding. */
427 raw_value = item_sdata(item);
428 if (!(raw_value & 0xfffffff0))
429 parser->global.unit_exponent = hid_snto32(raw_value, 4);
430 else
431 parser->global.unit_exponent = raw_value;
432 return 0;
433
434 case HID_GLOBAL_ITEM_TAG_UNIT:
435 parser->global.unit = item_udata(item);
436 return 0;
437
438 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
439 parser->global.report_size = item_udata(item);
440 if (parser->global.report_size > 256) {
441 hid_err(parser->device, "invalid report_size %d\n",
442 parser->global.report_size);
443 return -1;
444 }
445 return 0;
446
447 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
448 parser->global.report_count = item_udata(item);
449 if (parser->global.report_count > HID_MAX_USAGES) {
450 hid_err(parser->device, "invalid report_count %d\n",
451 parser->global.report_count);
452 return -1;
453 }
454 return 0;
455
456 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
457 parser->global.report_id = item_udata(item);
458 if (parser->global.report_id == 0 ||
459 parser->global.report_id >= HID_MAX_IDS) {
460 hid_err(parser->device, "report_id %u is invalid\n",
461 parser->global.report_id);
462 return -1;
463 }
464 return 0;
465
466 default:
467 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
468 return -1;
469 }
470}
471
472/*
473 * Process a local item.
474 */
475
476static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
477{
478 __u32 data;
479 unsigned n;
480 __u32 count;
481
482 data = item_udata(item);
483
484 switch (item->tag) {
485 case HID_LOCAL_ITEM_TAG_DELIMITER:
486
487 if (data) {
488 /*
489 * We treat items before the first delimiter
490 * as global to all usage sets (branch 0).
491 * In the moment we process only these global
492 * items and the first delimiter set.
493 */
494 if (parser->local.delimiter_depth != 0) {
495 hid_err(parser->device, "nested delimiters\n");
496 return -1;
497 }
498 parser->local.delimiter_depth++;
499 parser->local.delimiter_branch++;
500 } else {
501 if (parser->local.delimiter_depth < 1) {
502 hid_err(parser->device, "bogus close delimiter\n");
503 return -1;
504 }
505 parser->local.delimiter_depth--;
506 }
507 return 0;
508
509 case HID_LOCAL_ITEM_TAG_USAGE:
510
511 if (parser->local.delimiter_branch > 1) {
512 dbg_hid("alternative usage ignored\n");
513 return 0;
514 }
515
516 return hid_add_usage(parser, data, item->size);
517
518 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
519
520 if (parser->local.delimiter_branch > 1) {
521 dbg_hid("alternative usage ignored\n");
522 return 0;
523 }
524
525 parser->local.usage_minimum = data;
526 return 0;
527
528 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
529
530 if (parser->local.delimiter_branch > 1) {
531 dbg_hid("alternative usage ignored\n");
532 return 0;
533 }
534
535 count = data - parser->local.usage_minimum;
536 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
537 /*
538 * We do not warn if the name is not set, we are
539 * actually pre-scanning the device.
540 */
541 if (dev_name(&parser->device->dev))
542 hid_warn(parser->device,
543 "ignoring exceeding usage max\n");
544 data = HID_MAX_USAGES - parser->local.usage_index +
545 parser->local.usage_minimum - 1;
546 if (data <= 0) {
547 hid_err(parser->device,
548 "no more usage index available\n");
549 return -1;
550 }
551 }
552
553 for (n = parser->local.usage_minimum; n <= data; n++)
554 if (hid_add_usage(parser, n, item->size)) {
555 dbg_hid("hid_add_usage failed\n");
556 return -1;
557 }
558 return 0;
559
560 default:
561
562 dbg_hid("unknown local item tag 0x%x\n", item->tag);
563 return 0;
564 }
565 return 0;
566}
567
568/*
569 * Concatenate Usage Pages into Usages where relevant:
570 * As per specification, 6.2.2.8: "When the parser encounters a main item it
571 * concatenates the last declared Usage Page with a Usage to form a complete
572 * usage value."
573 */
574
575static void hid_concatenate_last_usage_page(struct hid_parser *parser)
576{
577 int i;
578 unsigned int usage_page;
579 unsigned int current_page;
580
581 if (!parser->local.usage_index)
582 return;
583
584 usage_page = parser->global.usage_page;
585
586 /*
587 * Concatenate usage page again only if last declared Usage Page
588 * has not been already used in previous usages concatenation
589 */
590 for (i = parser->local.usage_index - 1; i >= 0; i--) {
591 if (parser->local.usage_size[i] > 2)
592 /* Ignore extended usages */
593 continue;
594
595 current_page = parser->local.usage[i] >> 16;
596 if (current_page == usage_page)
597 break;
598
599 complete_usage(parser, i);
600 }
601}
602
603/*
604 * Process a main item.
605 */
606
607static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
608{
609 __u32 data;
610 int ret;
611
612 hid_concatenate_last_usage_page(parser);
613
614 data = item_udata(item);
615
616 switch (item->tag) {
617 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
618 ret = open_collection(parser, data & 0xff);
619 break;
620 case HID_MAIN_ITEM_TAG_END_COLLECTION:
621 ret = close_collection(parser);
622 break;
623 case HID_MAIN_ITEM_TAG_INPUT:
624 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
625 break;
626 case HID_MAIN_ITEM_TAG_OUTPUT:
627 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
628 break;
629 case HID_MAIN_ITEM_TAG_FEATURE:
630 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
631 break;
632 default:
633 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
634 ret = 0;
635 }
636
637 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
638
639 return ret;
640}
641
642/*
643 * Process a reserved item.
644 */
645
646static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
647{
648 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
649 return 0;
650}
651
652/*
653 * Free a report and all registered fields. The field->usage and
654 * field->value table's are allocated behind the field, so we need
655 * only to free(field) itself.
656 */
657
658static void hid_free_report(struct hid_report *report)
659{
660 unsigned n;
661
662 kfree(report->field_entries);
663
664 for (n = 0; n < report->maxfield; n++)
665 kfree(report->field[n]);
666 kfree(report);
667}
668
669/*
670 * Close report. This function returns the device
671 * state to the point prior to hid_open_report().
672 */
673static void hid_close_report(struct hid_device *device)
674{
675 unsigned i, j;
676
677 for (i = 0; i < HID_REPORT_TYPES; i++) {
678 struct hid_report_enum *report_enum = device->report_enum + i;
679
680 for (j = 0; j < HID_MAX_IDS; j++) {
681 struct hid_report *report = report_enum->report_id_hash[j];
682 if (report)
683 hid_free_report(report);
684 }
685 memset(report_enum, 0, sizeof(*report_enum));
686 INIT_LIST_HEAD(&report_enum->report_list);
687 }
688
689 kfree(device->rdesc);
690 device->rdesc = NULL;
691 device->rsize = 0;
692
693 kfree(device->collection);
694 device->collection = NULL;
695 device->collection_size = 0;
696 device->maxcollection = 0;
697 device->maxapplication = 0;
698
699 device->status &= ~HID_STAT_PARSED;
700}
701
702/*
703 * Free a device structure, all reports, and all fields.
704 */
705
706static void hid_device_release(struct device *dev)
707{
708 struct hid_device *hid = to_hid_device(dev);
709
710 hid_close_report(hid);
711 kfree(hid->dev_rdesc);
712 kfree(hid);
713}
714
715/*
716 * Fetch a report description item from the data stream. We support long
717 * items, though they are not used yet.
718 */
719
720static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
721{
722 u8 b;
723
724 if ((end - start) <= 0)
725 return NULL;
726
727 b = *start++;
728
729 item->type = (b >> 2) & 3;
730 item->tag = (b >> 4) & 15;
731
732 if (item->tag == HID_ITEM_TAG_LONG) {
733
734 item->format = HID_ITEM_FORMAT_LONG;
735
736 if ((end - start) < 2)
737 return NULL;
738
739 item->size = *start++;
740 item->tag = *start++;
741
742 if ((end - start) < item->size)
743 return NULL;
744
745 item->data.longdata = start;
746 start += item->size;
747 return start;
748 }
749
750 item->format = HID_ITEM_FORMAT_SHORT;
751 item->size = b & 3;
752
753 switch (item->size) {
754 case 0:
755 return start;
756
757 case 1:
758 if ((end - start) < 1)
759 return NULL;
760 item->data.u8 = *start++;
761 return start;
762
763 case 2:
764 if ((end - start) < 2)
765 return NULL;
766 item->data.u16 = get_unaligned_le16(start);
767 start = (__u8 *)((__le16 *)start + 1);
768 return start;
769
770 case 3:
771 item->size++;
772 if ((end - start) < 4)
773 return NULL;
774 item->data.u32 = get_unaligned_le32(start);
775 start = (__u8 *)((__le32 *)start + 1);
776 return start;
777 }
778
779 return NULL;
780}
781
782static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
783{
784 struct hid_device *hid = parser->device;
785
786 if (usage == HID_DG_CONTACTID)
787 hid->group = HID_GROUP_MULTITOUCH;
788}
789
790static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
791{
792 if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
793 parser->global.report_size == 8)
794 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
795
796 if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
797 parser->global.report_size == 8)
798 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
799}
800
801static void hid_scan_collection(struct hid_parser *parser, unsigned type)
802{
803 struct hid_device *hid = parser->device;
804 int i;
805
806 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
807 type == HID_COLLECTION_PHYSICAL)
808 hid->group = HID_GROUP_SENSOR_HUB;
809
810 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
811 hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
812 hid->group == HID_GROUP_MULTITOUCH)
813 hid->group = HID_GROUP_GENERIC;
814
815 if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
816 for (i = 0; i < parser->local.usage_index; i++)
817 if (parser->local.usage[i] == HID_GD_POINTER)
818 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
819
820 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
821 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
822
823 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
824 for (i = 0; i < parser->local.usage_index; i++)
825 if (parser->local.usage[i] ==
826 (HID_UP_GOOGLEVENDOR | 0x0001))
827 parser->device->group =
828 HID_GROUP_VIVALDI;
829}
830
831static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
832{
833 __u32 data;
834 int i;
835
836 hid_concatenate_last_usage_page(parser);
837
838 data = item_udata(item);
839
840 switch (item->tag) {
841 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
842 hid_scan_collection(parser, data & 0xff);
843 break;
844 case HID_MAIN_ITEM_TAG_END_COLLECTION:
845 break;
846 case HID_MAIN_ITEM_TAG_INPUT:
847 /* ignore constant inputs, they will be ignored by hid-input */
848 if (data & HID_MAIN_ITEM_CONSTANT)
849 break;
850 for (i = 0; i < parser->local.usage_index; i++)
851 hid_scan_input_usage(parser, parser->local.usage[i]);
852 break;
853 case HID_MAIN_ITEM_TAG_OUTPUT:
854 break;
855 case HID_MAIN_ITEM_TAG_FEATURE:
856 for (i = 0; i < parser->local.usage_index; i++)
857 hid_scan_feature_usage(parser, parser->local.usage[i]);
858 break;
859 }
860
861 /* Reset the local parser environment */
862 memset(&parser->local, 0, sizeof(parser->local));
863
864 return 0;
865}
866
867/*
868 * Scan a report descriptor before the device is added to the bus.
869 * Sets device groups and other properties that determine what driver
870 * to load.
871 */
872static int hid_scan_report(struct hid_device *hid)
873{
874 struct hid_parser *parser;
875 struct hid_item item;
876 __u8 *start = hid->dev_rdesc;
877 __u8 *end = start + hid->dev_rsize;
878 static int (*dispatch_type[])(struct hid_parser *parser,
879 struct hid_item *item) = {
880 hid_scan_main,
881 hid_parser_global,
882 hid_parser_local,
883 hid_parser_reserved
884 };
885
886 parser = vzalloc(sizeof(struct hid_parser));
887 if (!parser)
888 return -ENOMEM;
889
890 parser->device = hid;
891 hid->group = HID_GROUP_GENERIC;
892
893 /*
894 * The parsing is simpler than the one in hid_open_report() as we should
895 * be robust against hid errors. Those errors will be raised by
896 * hid_open_report() anyway.
897 */
898 while ((start = fetch_item(start, end, &item)) != NULL)
899 dispatch_type[item.type](parser, &item);
900
901 /*
902 * Handle special flags set during scanning.
903 */
904 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
905 (hid->group == HID_GROUP_MULTITOUCH))
906 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
907
908 /*
909 * Vendor specific handlings
910 */
911 switch (hid->vendor) {
912 case USB_VENDOR_ID_WACOM:
913 hid->group = HID_GROUP_WACOM;
914 break;
915 case USB_VENDOR_ID_SYNAPTICS:
916 if (hid->group == HID_GROUP_GENERIC)
917 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
918 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
919 /*
920 * hid-rmi should take care of them,
921 * not hid-generic
922 */
923 hid->group = HID_GROUP_RMI;
924 break;
925 }
926
927 kfree(parser->collection_stack);
928 vfree(parser);
929 return 0;
930}
931
932/**
933 * hid_parse_report - parse device report
934 *
935 * @hid: hid device
936 * @start: report start
937 * @size: report size
938 *
939 * Allocate the device report as read by the bus driver. This function should
940 * only be called from parse() in ll drivers.
941 */
942int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
943{
944 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
945 if (!hid->dev_rdesc)
946 return -ENOMEM;
947 hid->dev_rsize = size;
948 return 0;
949}
950EXPORT_SYMBOL_GPL(hid_parse_report);
951
952static const char * const hid_report_names[] = {
953 "HID_INPUT_REPORT",
954 "HID_OUTPUT_REPORT",
955 "HID_FEATURE_REPORT",
956};
957/**
958 * hid_validate_values - validate existing device report's value indexes
959 *
960 * @hid: hid device
961 * @type: which report type to examine
962 * @id: which report ID to examine (0 for first)
963 * @field_index: which report field to examine
964 * @report_counts: expected number of values
965 *
966 * Validate the number of values in a given field of a given report, after
967 * parsing.
968 */
969struct hid_report *hid_validate_values(struct hid_device *hid,
970 enum hid_report_type type, unsigned int id,
971 unsigned int field_index,
972 unsigned int report_counts)
973{
974 struct hid_report *report;
975
976 if (type > HID_FEATURE_REPORT) {
977 hid_err(hid, "invalid HID report type %u\n", type);
978 return NULL;
979 }
980
981 if (id >= HID_MAX_IDS) {
982 hid_err(hid, "invalid HID report id %u\n", id);
983 return NULL;
984 }
985
986 /*
987 * Explicitly not using hid_get_report() here since it depends on
988 * ->numbered being checked, which may not always be the case when
989 * drivers go to access report values.
990 */
991 if (id == 0) {
992 /*
993 * Validating on id 0 means we should examine the first
994 * report in the list.
995 */
996 report = list_first_entry_or_null(
997 &hid->report_enum[type].report_list,
998 struct hid_report, list);
999 } else {
1000 report = hid->report_enum[type].report_id_hash[id];
1001 }
1002 if (!report) {
1003 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1004 return NULL;
1005 }
1006 if (report->maxfield <= field_index) {
1007 hid_err(hid, "not enough fields in %s %u\n",
1008 hid_report_names[type], id);
1009 return NULL;
1010 }
1011 if (report->field[field_index]->report_count < report_counts) {
1012 hid_err(hid, "not enough values in %s %u field %u\n",
1013 hid_report_names[type], id, field_index);
1014 return NULL;
1015 }
1016 return report;
1017}
1018EXPORT_SYMBOL_GPL(hid_validate_values);
1019
1020static int hid_calculate_multiplier(struct hid_device *hid,
1021 struct hid_field *multiplier)
1022{
1023 int m;
1024 __s32 v = *multiplier->value;
1025 __s32 lmin = multiplier->logical_minimum;
1026 __s32 lmax = multiplier->logical_maximum;
1027 __s32 pmin = multiplier->physical_minimum;
1028 __s32 pmax = multiplier->physical_maximum;
1029
1030 /*
1031 * "Because OS implementations will generally divide the control's
1032 * reported count by the Effective Resolution Multiplier, designers
1033 * should take care not to establish a potential Effective
1034 * Resolution Multiplier of zero."
1035 * HID Usage Table, v1.12, Section 4.3.1, p31
1036 */
1037 if (lmax - lmin == 0)
1038 return 1;
1039 /*
1040 * Handling the unit exponent is left as an exercise to whoever
1041 * finds a device where that exponent is not 0.
1042 */
1043 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1044 if (unlikely(multiplier->unit_exponent != 0)) {
1045 hid_warn(hid,
1046 "unsupported Resolution Multiplier unit exponent %d\n",
1047 multiplier->unit_exponent);
1048 }
1049
1050 /* There are no devices with an effective multiplier > 255 */
1051 if (unlikely(m == 0 || m > 255 || m < -255)) {
1052 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1053 m = 1;
1054 }
1055
1056 return m;
1057}
1058
1059static void hid_apply_multiplier_to_field(struct hid_device *hid,
1060 struct hid_field *field,
1061 struct hid_collection *multiplier_collection,
1062 int effective_multiplier)
1063{
1064 struct hid_collection *collection;
1065 struct hid_usage *usage;
1066 int i;
1067
1068 /*
1069 * If multiplier_collection is NULL, the multiplier applies
1070 * to all fields in the report.
1071 * Otherwise, it is the Logical Collection the multiplier applies to
1072 * but our field may be in a subcollection of that collection.
1073 */
1074 for (i = 0; i < field->maxusage; i++) {
1075 usage = &field->usage[i];
1076
1077 collection = &hid->collection[usage->collection_index];
1078 while (collection->parent_idx != -1 &&
1079 collection != multiplier_collection)
1080 collection = &hid->collection[collection->parent_idx];
1081
1082 if (collection->parent_idx != -1 ||
1083 multiplier_collection == NULL)
1084 usage->resolution_multiplier = effective_multiplier;
1085
1086 }
1087}
1088
1089static void hid_apply_multiplier(struct hid_device *hid,
1090 struct hid_field *multiplier)
1091{
1092 struct hid_report_enum *rep_enum;
1093 struct hid_report *rep;
1094 struct hid_field *field;
1095 struct hid_collection *multiplier_collection;
1096 int effective_multiplier;
1097 int i;
1098
1099 /*
1100 * "The Resolution Multiplier control must be contained in the same
1101 * Logical Collection as the control(s) to which it is to be applied.
1102 * If no Resolution Multiplier is defined, then the Resolution
1103 * Multiplier defaults to 1. If more than one control exists in a
1104 * Logical Collection, the Resolution Multiplier is associated with
1105 * all controls in the collection. If no Logical Collection is
1106 * defined, the Resolution Multiplier is associated with all
1107 * controls in the report."
1108 * HID Usage Table, v1.12, Section 4.3.1, p30
1109 *
1110 * Thus, search from the current collection upwards until we find a
1111 * logical collection. Then search all fields for that same parent
1112 * collection. Those are the fields the multiplier applies to.
1113 *
1114 * If we have more than one multiplier, it will overwrite the
1115 * applicable fields later.
1116 */
1117 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1118 while (multiplier_collection->parent_idx != -1 &&
1119 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1120 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1121
1122 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1123
1124 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1125 list_for_each_entry(rep, &rep_enum->report_list, list) {
1126 for (i = 0; i < rep->maxfield; i++) {
1127 field = rep->field[i];
1128 hid_apply_multiplier_to_field(hid, field,
1129 multiplier_collection,
1130 effective_multiplier);
1131 }
1132 }
1133}
1134
1135/*
1136 * hid_setup_resolution_multiplier - set up all resolution multipliers
1137 *
1138 * @device: hid device
1139 *
1140 * Search for all Resolution Multiplier Feature Reports and apply their
1141 * value to all matching Input items. This only updates the internal struct
1142 * fields.
1143 *
1144 * The Resolution Multiplier is applied by the hardware. If the multiplier
1145 * is anything other than 1, the hardware will send pre-multiplied events
1146 * so that the same physical interaction generates an accumulated
1147 * accumulated_value = value * * multiplier
1148 * This may be achieved by sending
1149 * - "value * multiplier" for each event, or
1150 * - "value" but "multiplier" times as frequently, or
1151 * - a combination of the above
1152 * The only guarantee is that the same physical interaction always generates
1153 * an accumulated 'value * multiplier'.
1154 *
1155 * This function must be called before any event processing and after
1156 * any SetRequest to the Resolution Multiplier.
1157 */
1158void hid_setup_resolution_multiplier(struct hid_device *hid)
1159{
1160 struct hid_report_enum *rep_enum;
1161 struct hid_report *rep;
1162 struct hid_usage *usage;
1163 int i, j;
1164
1165 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1166 list_for_each_entry(rep, &rep_enum->report_list, list) {
1167 for (i = 0; i < rep->maxfield; i++) {
1168 /* Ignore if report count is out of bounds. */
1169 if (rep->field[i]->report_count < 1)
1170 continue;
1171
1172 for (j = 0; j < rep->field[i]->maxusage; j++) {
1173 usage = &rep->field[i]->usage[j];
1174 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1175 hid_apply_multiplier(hid,
1176 rep->field[i]);
1177 }
1178 }
1179 }
1180}
1181EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1182
1183/**
1184 * hid_open_report - open a driver-specific device report
1185 *
1186 * @device: hid device
1187 *
1188 * Parse a report description into a hid_device structure. Reports are
1189 * enumerated, fields are attached to these reports.
1190 * 0 returned on success, otherwise nonzero error value.
1191 *
1192 * This function (or the equivalent hid_parse() macro) should only be
1193 * called from probe() in drivers, before starting the device.
1194 */
1195int hid_open_report(struct hid_device *device)
1196{
1197 struct hid_parser *parser;
1198 struct hid_item item;
1199 unsigned int size;
1200 __u8 *start;
1201 __u8 *buf;
1202 __u8 *end;
1203 __u8 *next;
1204 int ret;
1205 int i;
1206 static int (*dispatch_type[])(struct hid_parser *parser,
1207 struct hid_item *item) = {
1208 hid_parser_main,
1209 hid_parser_global,
1210 hid_parser_local,
1211 hid_parser_reserved
1212 };
1213
1214 if (WARN_ON(device->status & HID_STAT_PARSED))
1215 return -EBUSY;
1216
1217 start = device->dev_rdesc;
1218 if (WARN_ON(!start))
1219 return -ENODEV;
1220 size = device->dev_rsize;
1221
1222 buf = kmemdup(start, size, GFP_KERNEL);
1223 if (buf == NULL)
1224 return -ENOMEM;
1225
1226 if (device->driver->report_fixup)
1227 start = device->driver->report_fixup(device, buf, &size);
1228 else
1229 start = buf;
1230
1231 start = kmemdup(start, size, GFP_KERNEL);
1232 kfree(buf);
1233 if (start == NULL)
1234 return -ENOMEM;
1235
1236 device->rdesc = start;
1237 device->rsize = size;
1238
1239 parser = vzalloc(sizeof(struct hid_parser));
1240 if (!parser) {
1241 ret = -ENOMEM;
1242 goto alloc_err;
1243 }
1244
1245 parser->device = device;
1246
1247 end = start + size;
1248
1249 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1250 sizeof(struct hid_collection), GFP_KERNEL);
1251 if (!device->collection) {
1252 ret = -ENOMEM;
1253 goto err;
1254 }
1255 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1256 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1257 device->collection[i].parent_idx = -1;
1258
1259 ret = -EINVAL;
1260 while ((next = fetch_item(start, end, &item)) != NULL) {
1261 start = next;
1262
1263 if (item.format != HID_ITEM_FORMAT_SHORT) {
1264 hid_err(device, "unexpected long global item\n");
1265 goto err;
1266 }
1267
1268 if (dispatch_type[item.type](parser, &item)) {
1269 hid_err(device, "item %u %u %u %u parsing failed\n",
1270 item.format, (unsigned)item.size,
1271 (unsigned)item.type, (unsigned)item.tag);
1272 goto err;
1273 }
1274
1275 if (start == end) {
1276 if (parser->collection_stack_ptr) {
1277 hid_err(device, "unbalanced collection at end of report description\n");
1278 goto err;
1279 }
1280 if (parser->local.delimiter_depth) {
1281 hid_err(device, "unbalanced delimiter at end of report description\n");
1282 goto err;
1283 }
1284
1285 /*
1286 * fetch initial values in case the device's
1287 * default multiplier isn't the recommended 1
1288 */
1289 hid_setup_resolution_multiplier(device);
1290
1291 kfree(parser->collection_stack);
1292 vfree(parser);
1293 device->status |= HID_STAT_PARSED;
1294
1295 return 0;
1296 }
1297 }
1298
1299 hid_err(device, "item fetching failed at offset %u/%u\n",
1300 size - (unsigned int)(end - start), size);
1301err:
1302 kfree(parser->collection_stack);
1303alloc_err:
1304 vfree(parser);
1305 hid_close_report(device);
1306 return ret;
1307}
1308EXPORT_SYMBOL_GPL(hid_open_report);
1309
1310/*
1311 * Convert a signed n-bit integer to signed 32-bit integer. Common
1312 * cases are done through the compiler, the screwed things has to be
1313 * done by hand.
1314 */
1315
1316static s32 snto32(__u32 value, unsigned n)
1317{
1318 if (!value || !n)
1319 return 0;
1320
1321 if (n > 32)
1322 n = 32;
1323
1324 switch (n) {
1325 case 8: return ((__s8)value);
1326 case 16: return ((__s16)value);
1327 case 32: return ((__s32)value);
1328 }
1329 return value & (1 << (n - 1)) ? value | (~0U << n) : value;
1330}
1331
1332s32 hid_snto32(__u32 value, unsigned n)
1333{
1334 return snto32(value, n);
1335}
1336EXPORT_SYMBOL_GPL(hid_snto32);
1337
1338/*
1339 * Convert a signed 32-bit integer to a signed n-bit integer.
1340 */
1341
1342static u32 s32ton(__s32 value, unsigned n)
1343{
1344 s32 a = value >> (n - 1);
1345 if (a && a != -1)
1346 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
1347 return value & ((1 << n) - 1);
1348}
1349
1350/*
1351 * Extract/implement a data field from/to a little endian report (bit array).
1352 *
1353 * Code sort-of follows HID spec:
1354 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1355 *
1356 * While the USB HID spec allows unlimited length bit fields in "report
1357 * descriptors", most devices never use more than 16 bits.
1358 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1359 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1360 */
1361
1362static u32 __extract(u8 *report, unsigned offset, int n)
1363{
1364 unsigned int idx = offset / 8;
1365 unsigned int bit_nr = 0;
1366 unsigned int bit_shift = offset % 8;
1367 int bits_to_copy = 8 - bit_shift;
1368 u32 value = 0;
1369 u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1370
1371 while (n > 0) {
1372 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1373 n -= bits_to_copy;
1374 bit_nr += bits_to_copy;
1375 bits_to_copy = 8;
1376 bit_shift = 0;
1377 idx++;
1378 }
1379
1380 return value & mask;
1381}
1382
1383u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1384 unsigned offset, unsigned n)
1385{
1386 if (n > 32) {
1387 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1388 __func__, n, current->comm);
1389 n = 32;
1390 }
1391
1392 return __extract(report, offset, n);
1393}
1394EXPORT_SYMBOL_GPL(hid_field_extract);
1395
1396/*
1397 * "implement" : set bits in a little endian bit stream.
1398 * Same concepts as "extract" (see comments above).
1399 * The data mangled in the bit stream remains in little endian
1400 * order the whole time. It make more sense to talk about
1401 * endianness of register values by considering a register
1402 * a "cached" copy of the little endian bit stream.
1403 */
1404
1405static void __implement(u8 *report, unsigned offset, int n, u32 value)
1406{
1407 unsigned int idx = offset / 8;
1408 unsigned int bit_shift = offset % 8;
1409 int bits_to_set = 8 - bit_shift;
1410
1411 while (n - bits_to_set >= 0) {
1412 report[idx] &= ~(0xff << bit_shift);
1413 report[idx] |= value << bit_shift;
1414 value >>= bits_to_set;
1415 n -= bits_to_set;
1416 bits_to_set = 8;
1417 bit_shift = 0;
1418 idx++;
1419 }
1420
1421 /* last nibble */
1422 if (n) {
1423 u8 bit_mask = ((1U << n) - 1);
1424 report[idx] &= ~(bit_mask << bit_shift);
1425 report[idx] |= value << bit_shift;
1426 }
1427}
1428
1429static void implement(const struct hid_device *hid, u8 *report,
1430 unsigned offset, unsigned n, u32 value)
1431{
1432 if (unlikely(n > 32)) {
1433 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1434 __func__, n, current->comm);
1435 n = 32;
1436 } else if (n < 32) {
1437 u32 m = (1U << n) - 1;
1438
1439 if (unlikely(value > m)) {
1440 hid_warn(hid,
1441 "%s() called with too large value %d (n: %d)! (%s)\n",
1442 __func__, value, n, current->comm);
1443 WARN_ON(1);
1444 value &= m;
1445 }
1446 }
1447
1448 __implement(report, offset, n, value);
1449}
1450
1451/*
1452 * Search an array for a value.
1453 */
1454
1455static int search(__s32 *array, __s32 value, unsigned n)
1456{
1457 while (n--) {
1458 if (*array++ == value)
1459 return 0;
1460 }
1461 return -1;
1462}
1463
1464/**
1465 * hid_match_report - check if driver's raw_event should be called
1466 *
1467 * @hid: hid device
1468 * @report: hid report to match against
1469 *
1470 * compare hid->driver->report_table->report_type to report->type
1471 */
1472static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1473{
1474 const struct hid_report_id *id = hid->driver->report_table;
1475
1476 if (!id) /* NULL means all */
1477 return 1;
1478
1479 for (; id->report_type != HID_TERMINATOR; id++)
1480 if (id->report_type == HID_ANY_ID ||
1481 id->report_type == report->type)
1482 return 1;
1483 return 0;
1484}
1485
1486/**
1487 * hid_match_usage - check if driver's event should be called
1488 *
1489 * @hid: hid device
1490 * @usage: usage to match against
1491 *
1492 * compare hid->driver->usage_table->usage_{type,code} to
1493 * usage->usage_{type,code}
1494 */
1495static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1496{
1497 const struct hid_usage_id *id = hid->driver->usage_table;
1498
1499 if (!id) /* NULL means all */
1500 return 1;
1501
1502 for (; id->usage_type != HID_ANY_ID - 1; id++)
1503 if ((id->usage_hid == HID_ANY_ID ||
1504 id->usage_hid == usage->hid) &&
1505 (id->usage_type == HID_ANY_ID ||
1506 id->usage_type == usage->type) &&
1507 (id->usage_code == HID_ANY_ID ||
1508 id->usage_code == usage->code))
1509 return 1;
1510 return 0;
1511}
1512
1513static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1514 struct hid_usage *usage, __s32 value, int interrupt)
1515{
1516 struct hid_driver *hdrv = hid->driver;
1517 int ret;
1518
1519 if (!list_empty(&hid->debug_list))
1520 hid_dump_input(hid, usage, value);
1521
1522 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1523 ret = hdrv->event(hid, field, usage, value);
1524 if (ret != 0) {
1525 if (ret < 0)
1526 hid_err(hid, "%s's event failed with %d\n",
1527 hdrv->name, ret);
1528 return;
1529 }
1530 }
1531
1532 if (hid->claimed & HID_CLAIMED_INPUT)
1533 hidinput_hid_event(hid, field, usage, value);
1534 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1535 hid->hiddev_hid_event(hid, field, usage, value);
1536}
1537
1538/*
1539 * Checks if the given value is valid within this field
1540 */
1541static inline int hid_array_value_is_valid(struct hid_field *field,
1542 __s32 value)
1543{
1544 __s32 min = field->logical_minimum;
1545
1546 /*
1547 * Value needs to be between logical min and max, and
1548 * (value - min) is used as an index in the usage array.
1549 * This array is of size field->maxusage
1550 */
1551 return value >= min &&
1552 value <= field->logical_maximum &&
1553 value - min < field->maxusage;
1554}
1555
1556/*
1557 * Fetch the field from the data. The field content is stored for next
1558 * report processing (we do differential reporting to the layer).
1559 */
1560static void hid_input_fetch_field(struct hid_device *hid,
1561 struct hid_field *field,
1562 __u8 *data)
1563{
1564 unsigned n;
1565 unsigned count = field->report_count;
1566 unsigned offset = field->report_offset;
1567 unsigned size = field->report_size;
1568 __s32 min = field->logical_minimum;
1569 __s32 *value;
1570
1571 value = field->new_value;
1572 memset(value, 0, count * sizeof(__s32));
1573 field->ignored = false;
1574
1575 for (n = 0; n < count; n++) {
1576
1577 value[n] = min < 0 ?
1578 snto32(hid_field_extract(hid, data, offset + n * size,
1579 size), size) :
1580 hid_field_extract(hid, data, offset + n * size, size);
1581
1582 /* Ignore report if ErrorRollOver */
1583 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1584 hid_array_value_is_valid(field, value[n]) &&
1585 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1586 field->ignored = true;
1587 return;
1588 }
1589 }
1590}
1591
1592/*
1593 * Process a received variable field.
1594 */
1595
1596static void hid_input_var_field(struct hid_device *hid,
1597 struct hid_field *field,
1598 int interrupt)
1599{
1600 unsigned int count = field->report_count;
1601 __s32 *value = field->new_value;
1602 unsigned int n;
1603
1604 for (n = 0; n < count; n++)
1605 hid_process_event(hid,
1606 field,
1607 &field->usage[n],
1608 value[n],
1609 interrupt);
1610
1611 memcpy(field->value, value, count * sizeof(__s32));
1612}
1613
1614/*
1615 * Process a received array field. The field content is stored for
1616 * next report processing (we do differential reporting to the layer).
1617 */
1618
1619static void hid_input_array_field(struct hid_device *hid,
1620 struct hid_field *field,
1621 int interrupt)
1622{
1623 unsigned int n;
1624 unsigned int count = field->report_count;
1625 __s32 min = field->logical_minimum;
1626 __s32 *value;
1627
1628 value = field->new_value;
1629
1630 /* ErrorRollOver */
1631 if (field->ignored)
1632 return;
1633
1634 for (n = 0; n < count; n++) {
1635 if (hid_array_value_is_valid(field, field->value[n]) &&
1636 search(value, field->value[n], count))
1637 hid_process_event(hid,
1638 field,
1639 &field->usage[field->value[n] - min],
1640 0,
1641 interrupt);
1642
1643 if (hid_array_value_is_valid(field, value[n]) &&
1644 search(field->value, value[n], count))
1645 hid_process_event(hid,
1646 field,
1647 &field->usage[value[n] - min],
1648 1,
1649 interrupt);
1650 }
1651
1652 memcpy(field->value, value, count * sizeof(__s32));
1653}
1654
1655/*
1656 * Analyse a received report, and fetch the data from it. The field
1657 * content is stored for next report processing (we do differential
1658 * reporting to the layer).
1659 */
1660static void hid_process_report(struct hid_device *hid,
1661 struct hid_report *report,
1662 __u8 *data,
1663 int interrupt)
1664{
1665 unsigned int a;
1666 struct hid_field_entry *entry;
1667 struct hid_field *field;
1668
1669 /* first retrieve all incoming values in data */
1670 for (a = 0; a < report->maxfield; a++)
1671 hid_input_fetch_field(hid, report->field[a], data);
1672
1673 if (!list_empty(&report->field_entry_list)) {
1674 /* INPUT_REPORT, we have a priority list of fields */
1675 list_for_each_entry(entry,
1676 &report->field_entry_list,
1677 list) {
1678 field = entry->field;
1679
1680 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1681 hid_process_event(hid,
1682 field,
1683 &field->usage[entry->index],
1684 field->new_value[entry->index],
1685 interrupt);
1686 else
1687 hid_input_array_field(hid, field, interrupt);
1688 }
1689
1690 /* we need to do the memcpy at the end for var items */
1691 for (a = 0; a < report->maxfield; a++) {
1692 field = report->field[a];
1693
1694 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1695 memcpy(field->value, field->new_value,
1696 field->report_count * sizeof(__s32));
1697 }
1698 } else {
1699 /* FEATURE_REPORT, regular processing */
1700 for (a = 0; a < report->maxfield; a++) {
1701 field = report->field[a];
1702
1703 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1704 hid_input_var_field(hid, field, interrupt);
1705 else
1706 hid_input_array_field(hid, field, interrupt);
1707 }
1708 }
1709}
1710
1711/*
1712 * Insert a given usage_index in a field in the list
1713 * of processed usages in the report.
1714 *
1715 * The elements of lower priority score are processed
1716 * first.
1717 */
1718static void __hid_insert_field_entry(struct hid_device *hid,
1719 struct hid_report *report,
1720 struct hid_field_entry *entry,
1721 struct hid_field *field,
1722 unsigned int usage_index)
1723{
1724 struct hid_field_entry *next;
1725
1726 entry->field = field;
1727 entry->index = usage_index;
1728 entry->priority = field->usages_priorities[usage_index];
1729
1730 /* insert the element at the correct position */
1731 list_for_each_entry(next,
1732 &report->field_entry_list,
1733 list) {
1734 /*
1735 * the priority of our element is strictly higher
1736 * than the next one, insert it before
1737 */
1738 if (entry->priority > next->priority) {
1739 list_add_tail(&entry->list, &next->list);
1740 return;
1741 }
1742 }
1743
1744 /* lowest priority score: insert at the end */
1745 list_add_tail(&entry->list, &report->field_entry_list);
1746}
1747
1748static void hid_report_process_ordering(struct hid_device *hid,
1749 struct hid_report *report)
1750{
1751 struct hid_field *field;
1752 struct hid_field_entry *entries;
1753 unsigned int a, u, usages;
1754 unsigned int count = 0;
1755
1756 /* count the number of individual fields in the report */
1757 for (a = 0; a < report->maxfield; a++) {
1758 field = report->field[a];
1759
1760 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1761 count += field->report_count;
1762 else
1763 count++;
1764 }
1765
1766 /* allocate the memory to process the fields */
1767 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1768 if (!entries)
1769 return;
1770
1771 report->field_entries = entries;
1772
1773 /*
1774 * walk through all fields in the report and
1775 * store them by priority order in report->field_entry_list
1776 *
1777 * - Var elements are individualized (field + usage_index)
1778 * - Arrays are taken as one, we can not chose an order for them
1779 */
1780 usages = 0;
1781 for (a = 0; a < report->maxfield; a++) {
1782 field = report->field[a];
1783
1784 if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1785 for (u = 0; u < field->report_count; u++) {
1786 __hid_insert_field_entry(hid, report,
1787 &entries[usages],
1788 field, u);
1789 usages++;
1790 }
1791 } else {
1792 __hid_insert_field_entry(hid, report, &entries[usages],
1793 field, 0);
1794 usages++;
1795 }
1796 }
1797}
1798
1799static void hid_process_ordering(struct hid_device *hid)
1800{
1801 struct hid_report *report;
1802 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1803
1804 list_for_each_entry(report, &report_enum->report_list, list)
1805 hid_report_process_ordering(hid, report);
1806}
1807
1808/*
1809 * Output the field into the report.
1810 */
1811
1812static void hid_output_field(const struct hid_device *hid,
1813 struct hid_field *field, __u8 *data)
1814{
1815 unsigned count = field->report_count;
1816 unsigned offset = field->report_offset;
1817 unsigned size = field->report_size;
1818 unsigned n;
1819
1820 for (n = 0; n < count; n++) {
1821 if (field->logical_minimum < 0) /* signed values */
1822 implement(hid, data, offset + n * size, size,
1823 s32ton(field->value[n], size));
1824 else /* unsigned values */
1825 implement(hid, data, offset + n * size, size,
1826 field->value[n]);
1827 }
1828}
1829
1830/*
1831 * Compute the size of a report.
1832 */
1833static size_t hid_compute_report_size(struct hid_report *report)
1834{
1835 if (report->size)
1836 return ((report->size - 1) >> 3) + 1;
1837
1838 return 0;
1839}
1840
1841/*
1842 * Create a report. 'data' has to be allocated using
1843 * hid_alloc_report_buf() so that it has proper size.
1844 */
1845
1846void hid_output_report(struct hid_report *report, __u8 *data)
1847{
1848 unsigned n;
1849
1850 if (report->id > 0)
1851 *data++ = report->id;
1852
1853 memset(data, 0, hid_compute_report_size(report));
1854 for (n = 0; n < report->maxfield; n++)
1855 hid_output_field(report->device, report->field[n], data);
1856}
1857EXPORT_SYMBOL_GPL(hid_output_report);
1858
1859/*
1860 * Allocator for buffer that is going to be passed to hid_output_report()
1861 */
1862u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1863{
1864 /*
1865 * 7 extra bytes are necessary to achieve proper functionality
1866 * of implement() working on 8 byte chunks
1867 */
1868
1869 u32 len = hid_report_len(report) + 7;
1870
1871 return kmalloc(len, flags);
1872}
1873EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1874
1875/*
1876 * Set a field value. The report this field belongs to has to be
1877 * created and transferred to the device, to set this value in the
1878 * device.
1879 */
1880
1881int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1882{
1883 unsigned size;
1884
1885 if (!field)
1886 return -1;
1887
1888 size = field->report_size;
1889
1890 hid_dump_input(field->report->device, field->usage + offset, value);
1891
1892 if (offset >= field->report_count) {
1893 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1894 offset, field->report_count);
1895 return -1;
1896 }
1897 if (field->logical_minimum < 0) {
1898 if (value != snto32(s32ton(value, size), size)) {
1899 hid_err(field->report->device, "value %d is out of range\n", value);
1900 return -1;
1901 }
1902 }
1903 field->value[offset] = value;
1904 return 0;
1905}
1906EXPORT_SYMBOL_GPL(hid_set_field);
1907
1908static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1909 const u8 *data)
1910{
1911 struct hid_report *report;
1912 unsigned int n = 0; /* Normally report number is 0 */
1913
1914 /* Device uses numbered reports, data[0] is report number */
1915 if (report_enum->numbered)
1916 n = *data;
1917
1918 report = report_enum->report_id_hash[n];
1919 if (report == NULL)
1920 dbg_hid("undefined report_id %u received\n", n);
1921
1922 return report;
1923}
1924
1925/*
1926 * Implement a generic .request() callback, using .raw_request()
1927 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1928 */
1929int __hid_request(struct hid_device *hid, struct hid_report *report,
1930 enum hid_class_request reqtype)
1931{
1932 char *buf;
1933 int ret;
1934 u32 len;
1935
1936 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1937 if (!buf)
1938 return -ENOMEM;
1939
1940 len = hid_report_len(report);
1941
1942 if (reqtype == HID_REQ_SET_REPORT)
1943 hid_output_report(report, buf);
1944
1945 ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
1946 report->type, reqtype);
1947 if (ret < 0) {
1948 dbg_hid("unable to complete request: %d\n", ret);
1949 goto out;
1950 }
1951
1952 if (reqtype == HID_REQ_GET_REPORT)
1953 hid_input_report(hid, report->type, buf, ret, 0);
1954
1955 ret = 0;
1956
1957out:
1958 kfree(buf);
1959 return ret;
1960}
1961EXPORT_SYMBOL_GPL(__hid_request);
1962
1963int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
1964 int interrupt)
1965{
1966 struct hid_report_enum *report_enum = hid->report_enum + type;
1967 struct hid_report *report;
1968 struct hid_driver *hdrv;
1969 u32 rsize, csize = size;
1970 u8 *cdata = data;
1971 int ret = 0;
1972
1973 report = hid_get_report(report_enum, data);
1974 if (!report)
1975 goto out;
1976
1977 if (report_enum->numbered) {
1978 cdata++;
1979 csize--;
1980 }
1981
1982 rsize = hid_compute_report_size(report);
1983
1984 if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
1985 rsize = HID_MAX_BUFFER_SIZE - 1;
1986 else if (rsize > HID_MAX_BUFFER_SIZE)
1987 rsize = HID_MAX_BUFFER_SIZE;
1988
1989 if (csize < rsize) {
1990 dbg_hid("report %d is too short, (%d < %d)\n", report->id,
1991 csize, rsize);
1992 memset(cdata + csize, 0, rsize - csize);
1993 }
1994
1995 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
1996 hid->hiddev_report_event(hid, report);
1997 if (hid->claimed & HID_CLAIMED_HIDRAW) {
1998 ret = hidraw_report_event(hid, data, size);
1999 if (ret)
2000 goto out;
2001 }
2002
2003 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2004 hid_process_report(hid, report, cdata, interrupt);
2005 hdrv = hid->driver;
2006 if (hdrv && hdrv->report)
2007 hdrv->report(hid, report);
2008 }
2009
2010 if (hid->claimed & HID_CLAIMED_INPUT)
2011 hidinput_report_event(hid, report);
2012out:
2013 return ret;
2014}
2015EXPORT_SYMBOL_GPL(hid_report_raw_event);
2016
2017/**
2018 * hid_input_report - report data from lower layer (usb, bt...)
2019 *
2020 * @hid: hid device
2021 * @type: HID report type (HID_*_REPORT)
2022 * @data: report contents
2023 * @size: size of data parameter
2024 * @interrupt: distinguish between interrupt and control transfers
2025 *
2026 * This is data entry for lower layers.
2027 */
2028int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2029 int interrupt)
2030{
2031 struct hid_report_enum *report_enum;
2032 struct hid_driver *hdrv;
2033 struct hid_report *report;
2034 int ret = 0;
2035
2036 if (!hid)
2037 return -ENODEV;
2038
2039 if (down_trylock(&hid->driver_input_lock))
2040 return -EBUSY;
2041
2042 if (!hid->driver) {
2043 ret = -ENODEV;
2044 goto unlock;
2045 }
2046 report_enum = hid->report_enum + type;
2047 hdrv = hid->driver;
2048
2049 if (!size) {
2050 dbg_hid("empty report\n");
2051 ret = -1;
2052 goto unlock;
2053 }
2054
2055 /* Avoid unnecessary overhead if debugfs is disabled */
2056 if (!list_empty(&hid->debug_list))
2057 hid_dump_report(hid, type, data, size);
2058
2059 report = hid_get_report(report_enum, data);
2060
2061 if (!report) {
2062 ret = -1;
2063 goto unlock;
2064 }
2065
2066 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2067 ret = hdrv->raw_event(hid, report, data, size);
2068 if (ret < 0)
2069 goto unlock;
2070 }
2071
2072 ret = hid_report_raw_event(hid, type, data, size, interrupt);
2073
2074unlock:
2075 up(&hid->driver_input_lock);
2076 return ret;
2077}
2078EXPORT_SYMBOL_GPL(hid_input_report);
2079
2080bool hid_match_one_id(const struct hid_device *hdev,
2081 const struct hid_device_id *id)
2082{
2083 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2084 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2085 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2086 (id->product == HID_ANY_ID || id->product == hdev->product);
2087}
2088
2089const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2090 const struct hid_device_id *id)
2091{
2092 for (; id->bus; id++)
2093 if (hid_match_one_id(hdev, id))
2094 return id;
2095
2096 return NULL;
2097}
2098EXPORT_SYMBOL_GPL(hid_match_id);
2099
2100static const struct hid_device_id hid_hiddev_list[] = {
2101 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2102 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2103 { }
2104};
2105
2106static bool hid_hiddev(struct hid_device *hdev)
2107{
2108 return !!hid_match_id(hdev, hid_hiddev_list);
2109}
2110
2111
2112static ssize_t
2113read_report_descriptor(struct file *filp, struct kobject *kobj,
2114 struct bin_attribute *attr,
2115 char *buf, loff_t off, size_t count)
2116{
2117 struct device *dev = kobj_to_dev(kobj);
2118 struct hid_device *hdev = to_hid_device(dev);
2119
2120 if (off >= hdev->rsize)
2121 return 0;
2122
2123 if (off + count > hdev->rsize)
2124 count = hdev->rsize - off;
2125
2126 memcpy(buf, hdev->rdesc + off, count);
2127
2128 return count;
2129}
2130
2131static ssize_t
2132show_country(struct device *dev, struct device_attribute *attr,
2133 char *buf)
2134{
2135 struct hid_device *hdev = to_hid_device(dev);
2136
2137 return sprintf(buf, "%02x\n", hdev->country & 0xff);
2138}
2139
2140static struct bin_attribute dev_bin_attr_report_desc = {
2141 .attr = { .name = "report_descriptor", .mode = 0444 },
2142 .read = read_report_descriptor,
2143 .size = HID_MAX_DESCRIPTOR_SIZE,
2144};
2145
2146static const struct device_attribute dev_attr_country = {
2147 .attr = { .name = "country", .mode = 0444 },
2148 .show = show_country,
2149};
2150
2151int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2152{
2153 static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2154 "Joystick", "Gamepad", "Keyboard", "Keypad",
2155 "Multi-Axis Controller"
2156 };
2157 const char *type, *bus;
2158 char buf[64] = "";
2159 unsigned int i;
2160 int len;
2161 int ret;
2162
2163 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2164 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2165 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2166 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2167 if (hdev->bus != BUS_USB)
2168 connect_mask &= ~HID_CONNECT_HIDDEV;
2169 if (hid_hiddev(hdev))
2170 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2171
2172 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2173 connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2174 hdev->claimed |= HID_CLAIMED_INPUT;
2175
2176 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2177 !hdev->hiddev_connect(hdev,
2178 connect_mask & HID_CONNECT_HIDDEV_FORCE))
2179 hdev->claimed |= HID_CLAIMED_HIDDEV;
2180 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2181 hdev->claimed |= HID_CLAIMED_HIDRAW;
2182
2183 if (connect_mask & HID_CONNECT_DRIVER)
2184 hdev->claimed |= HID_CLAIMED_DRIVER;
2185
2186 /* Drivers with the ->raw_event callback set are not required to connect
2187 * to any other listener. */
2188 if (!hdev->claimed && !hdev->driver->raw_event) {
2189 hid_err(hdev, "device has no listeners, quitting\n");
2190 return -ENODEV;
2191 }
2192
2193 hid_process_ordering(hdev);
2194
2195 if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2196 (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2197 hdev->ff_init(hdev);
2198
2199 len = 0;
2200 if (hdev->claimed & HID_CLAIMED_INPUT)
2201 len += sprintf(buf + len, "input");
2202 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2203 len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2204 ((struct hiddev *)hdev->hiddev)->minor);
2205 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2206 len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2207 ((struct hidraw *)hdev->hidraw)->minor);
2208
2209 type = "Device";
2210 for (i = 0; i < hdev->maxcollection; i++) {
2211 struct hid_collection *col = &hdev->collection[i];
2212 if (col->type == HID_COLLECTION_APPLICATION &&
2213 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2214 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2215 type = types[col->usage & 0xffff];
2216 break;
2217 }
2218 }
2219
2220 switch (hdev->bus) {
2221 case BUS_USB:
2222 bus = "USB";
2223 break;
2224 case BUS_BLUETOOTH:
2225 bus = "BLUETOOTH";
2226 break;
2227 case BUS_I2C:
2228 bus = "I2C";
2229 break;
2230 case BUS_VIRTUAL:
2231 bus = "VIRTUAL";
2232 break;
2233 case BUS_INTEL_ISHTP:
2234 case BUS_AMD_SFH:
2235 bus = "SENSOR HUB";
2236 break;
2237 default:
2238 bus = "<UNKNOWN>";
2239 }
2240
2241 ret = device_create_file(&hdev->dev, &dev_attr_country);
2242 if (ret)
2243 hid_warn(hdev,
2244 "can't create sysfs country code attribute err: %d\n", ret);
2245
2246 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2247 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2248 type, hdev->name, hdev->phys);
2249
2250 return 0;
2251}
2252EXPORT_SYMBOL_GPL(hid_connect);
2253
2254void hid_disconnect(struct hid_device *hdev)
2255{
2256 device_remove_file(&hdev->dev, &dev_attr_country);
2257 if (hdev->claimed & HID_CLAIMED_INPUT)
2258 hidinput_disconnect(hdev);
2259 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2260 hdev->hiddev_disconnect(hdev);
2261 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2262 hidraw_disconnect(hdev);
2263 hdev->claimed = 0;
2264}
2265EXPORT_SYMBOL_GPL(hid_disconnect);
2266
2267/**
2268 * hid_hw_start - start underlying HW
2269 * @hdev: hid device
2270 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2271 *
2272 * Call this in probe function *after* hid_parse. This will setup HW
2273 * buffers and start the device (if not defeirred to device open).
2274 * hid_hw_stop must be called if this was successful.
2275 */
2276int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2277{
2278 int error;
2279
2280 error = hdev->ll_driver->start(hdev);
2281 if (error)
2282 return error;
2283
2284 if (connect_mask) {
2285 error = hid_connect(hdev, connect_mask);
2286 if (error) {
2287 hdev->ll_driver->stop(hdev);
2288 return error;
2289 }
2290 }
2291
2292 return 0;
2293}
2294EXPORT_SYMBOL_GPL(hid_hw_start);
2295
2296/**
2297 * hid_hw_stop - stop underlying HW
2298 * @hdev: hid device
2299 *
2300 * This is usually called from remove function or from probe when something
2301 * failed and hid_hw_start was called already.
2302 */
2303void hid_hw_stop(struct hid_device *hdev)
2304{
2305 hid_disconnect(hdev);
2306 hdev->ll_driver->stop(hdev);
2307}
2308EXPORT_SYMBOL_GPL(hid_hw_stop);
2309
2310/**
2311 * hid_hw_open - signal underlying HW to start delivering events
2312 * @hdev: hid device
2313 *
2314 * Tell underlying HW to start delivering events from the device.
2315 * This function should be called sometime after successful call
2316 * to hid_hw_start().
2317 */
2318int hid_hw_open(struct hid_device *hdev)
2319{
2320 int ret;
2321
2322 ret = mutex_lock_killable(&hdev->ll_open_lock);
2323 if (ret)
2324 return ret;
2325
2326 if (!hdev->ll_open_count++) {
2327 ret = hdev->ll_driver->open(hdev);
2328 if (ret)
2329 hdev->ll_open_count--;
2330 }
2331
2332 mutex_unlock(&hdev->ll_open_lock);
2333 return ret;
2334}
2335EXPORT_SYMBOL_GPL(hid_hw_open);
2336
2337/**
2338 * hid_hw_close - signal underlaying HW to stop delivering events
2339 *
2340 * @hdev: hid device
2341 *
2342 * This function indicates that we are not interested in the events
2343 * from this device anymore. Delivery of events may or may not stop,
2344 * depending on the number of users still outstanding.
2345 */
2346void hid_hw_close(struct hid_device *hdev)
2347{
2348 mutex_lock(&hdev->ll_open_lock);
2349 if (!--hdev->ll_open_count)
2350 hdev->ll_driver->close(hdev);
2351 mutex_unlock(&hdev->ll_open_lock);
2352}
2353EXPORT_SYMBOL_GPL(hid_hw_close);
2354
2355/**
2356 * hid_hw_request - send report request to device
2357 *
2358 * @hdev: hid device
2359 * @report: report to send
2360 * @reqtype: hid request type
2361 */
2362void hid_hw_request(struct hid_device *hdev,
2363 struct hid_report *report, enum hid_class_request reqtype)
2364{
2365 if (hdev->ll_driver->request)
2366 return hdev->ll_driver->request(hdev, report, reqtype);
2367
2368 __hid_request(hdev, report, reqtype);
2369}
2370EXPORT_SYMBOL_GPL(hid_hw_request);
2371
2372/**
2373 * hid_hw_raw_request - send report request to device
2374 *
2375 * @hdev: hid device
2376 * @reportnum: report ID
2377 * @buf: in/out data to transfer
2378 * @len: length of buf
2379 * @rtype: HID report type
2380 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2381 *
2382 * Return: count of data transferred, negative if error
2383 *
2384 * Same behavior as hid_hw_request, but with raw buffers instead.
2385 */
2386int hid_hw_raw_request(struct hid_device *hdev,
2387 unsigned char reportnum, __u8 *buf,
2388 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2389{
2390 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
2391 return -EINVAL;
2392
2393 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2394 rtype, reqtype);
2395}
2396EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2397
2398/**
2399 * hid_hw_output_report - send output report to device
2400 *
2401 * @hdev: hid device
2402 * @buf: raw data to transfer
2403 * @len: length of buf
2404 *
2405 * Return: count of data transferred, negative if error
2406 */
2407int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2408{
2409 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf)
2410 return -EINVAL;
2411
2412 if (hdev->ll_driver->output_report)
2413 return hdev->ll_driver->output_report(hdev, buf, len);
2414
2415 return -ENOSYS;
2416}
2417EXPORT_SYMBOL_GPL(hid_hw_output_report);
2418
2419#ifdef CONFIG_PM
2420int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2421{
2422 if (hdev->driver && hdev->driver->suspend)
2423 return hdev->driver->suspend(hdev, state);
2424
2425 return 0;
2426}
2427EXPORT_SYMBOL_GPL(hid_driver_suspend);
2428
2429int hid_driver_reset_resume(struct hid_device *hdev)
2430{
2431 if (hdev->driver && hdev->driver->reset_resume)
2432 return hdev->driver->reset_resume(hdev);
2433
2434 return 0;
2435}
2436EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2437
2438int hid_driver_resume(struct hid_device *hdev)
2439{
2440 if (hdev->driver && hdev->driver->resume)
2441 return hdev->driver->resume(hdev);
2442
2443 return 0;
2444}
2445EXPORT_SYMBOL_GPL(hid_driver_resume);
2446#endif /* CONFIG_PM */
2447
2448struct hid_dynid {
2449 struct list_head list;
2450 struct hid_device_id id;
2451};
2452
2453/**
2454 * new_id_store - add a new HID device ID to this driver and re-probe devices
2455 * @drv: target device driver
2456 * @buf: buffer for scanning device ID data
2457 * @count: input size
2458 *
2459 * Adds a new dynamic hid device ID to this driver,
2460 * and causes the driver to probe for all devices again.
2461 */
2462static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2463 size_t count)
2464{
2465 struct hid_driver *hdrv = to_hid_driver(drv);
2466 struct hid_dynid *dynid;
2467 __u32 bus, vendor, product;
2468 unsigned long driver_data = 0;
2469 int ret;
2470
2471 ret = sscanf(buf, "%x %x %x %lx",
2472 &bus, &vendor, &product, &driver_data);
2473 if (ret < 3)
2474 return -EINVAL;
2475
2476 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2477 if (!dynid)
2478 return -ENOMEM;
2479
2480 dynid->id.bus = bus;
2481 dynid->id.group = HID_GROUP_ANY;
2482 dynid->id.vendor = vendor;
2483 dynid->id.product = product;
2484 dynid->id.driver_data = driver_data;
2485
2486 spin_lock(&hdrv->dyn_lock);
2487 list_add_tail(&dynid->list, &hdrv->dyn_list);
2488 spin_unlock(&hdrv->dyn_lock);
2489
2490 ret = driver_attach(&hdrv->driver);
2491
2492 return ret ? : count;
2493}
2494static DRIVER_ATTR_WO(new_id);
2495
2496static struct attribute *hid_drv_attrs[] = {
2497 &driver_attr_new_id.attr,
2498 NULL,
2499};
2500ATTRIBUTE_GROUPS(hid_drv);
2501
2502static void hid_free_dynids(struct hid_driver *hdrv)
2503{
2504 struct hid_dynid *dynid, *n;
2505
2506 spin_lock(&hdrv->dyn_lock);
2507 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2508 list_del(&dynid->list);
2509 kfree(dynid);
2510 }
2511 spin_unlock(&hdrv->dyn_lock);
2512}
2513
2514const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2515 struct hid_driver *hdrv)
2516{
2517 struct hid_dynid *dynid;
2518
2519 spin_lock(&hdrv->dyn_lock);
2520 list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2521 if (hid_match_one_id(hdev, &dynid->id)) {
2522 spin_unlock(&hdrv->dyn_lock);
2523 return &dynid->id;
2524 }
2525 }
2526 spin_unlock(&hdrv->dyn_lock);
2527
2528 return hid_match_id(hdev, hdrv->id_table);
2529}
2530EXPORT_SYMBOL_GPL(hid_match_device);
2531
2532static int hid_bus_match(struct device *dev, struct device_driver *drv)
2533{
2534 struct hid_driver *hdrv = to_hid_driver(drv);
2535 struct hid_device *hdev = to_hid_device(dev);
2536
2537 return hid_match_device(hdev, hdrv) != NULL;
2538}
2539
2540/**
2541 * hid_compare_device_paths - check if both devices share the same path
2542 * @hdev_a: hid device
2543 * @hdev_b: hid device
2544 * @separator: char to use as separator
2545 *
2546 * Check if two devices share the same path up to the last occurrence of
2547 * the separator char. Both paths must exist (i.e., zero-length paths
2548 * don't match).
2549 */
2550bool hid_compare_device_paths(struct hid_device *hdev_a,
2551 struct hid_device *hdev_b, char separator)
2552{
2553 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2554 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2555
2556 if (n1 != n2 || n1 <= 0 || n2 <= 0)
2557 return false;
2558
2559 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2560}
2561EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2562
2563static int hid_device_probe(struct device *dev)
2564{
2565 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2566 struct hid_device *hdev = to_hid_device(dev);
2567 const struct hid_device_id *id;
2568 int ret = 0;
2569
2570 if (down_interruptible(&hdev->driver_input_lock)) {
2571 ret = -EINTR;
2572 goto end;
2573 }
2574 hdev->io_started = false;
2575
2576 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2577
2578 if (!hdev->driver) {
2579 id = hid_match_device(hdev, hdrv);
2580 if (id == NULL) {
2581 ret = -ENODEV;
2582 goto unlock;
2583 }
2584
2585 if (hdrv->match) {
2586 if (!hdrv->match(hdev, hid_ignore_special_drivers)) {
2587 ret = -ENODEV;
2588 goto unlock;
2589 }
2590 } else {
2591 /*
2592 * hid-generic implements .match(), so if
2593 * hid_ignore_special_drivers is set, we can safely
2594 * return.
2595 */
2596 if (hid_ignore_special_drivers) {
2597 ret = -ENODEV;
2598 goto unlock;
2599 }
2600 }
2601
2602 /* reset the quirks that has been previously set */
2603 hdev->quirks = hid_lookup_quirk(hdev);
2604 hdev->driver = hdrv;
2605 if (hdrv->probe) {
2606 ret = hdrv->probe(hdev, id);
2607 } else { /* default probe */
2608 ret = hid_open_report(hdev);
2609 if (!ret)
2610 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2611 }
2612 if (ret) {
2613 hid_close_report(hdev);
2614 hdev->driver = NULL;
2615 }
2616 }
2617unlock:
2618 if (!hdev->io_started)
2619 up(&hdev->driver_input_lock);
2620end:
2621 return ret;
2622}
2623
2624static void hid_device_remove(struct device *dev)
2625{
2626 struct hid_device *hdev = to_hid_device(dev);
2627 struct hid_driver *hdrv;
2628
2629 down(&hdev->driver_input_lock);
2630 hdev->io_started = false;
2631
2632 hdrv = hdev->driver;
2633 if (hdrv) {
2634 if (hdrv->remove)
2635 hdrv->remove(hdev);
2636 else /* default remove */
2637 hid_hw_stop(hdev);
2638 hid_close_report(hdev);
2639 hdev->driver = NULL;
2640 }
2641
2642 if (!hdev->io_started)
2643 up(&hdev->driver_input_lock);
2644}
2645
2646static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2647 char *buf)
2648{
2649 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2650
2651 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2652 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2653}
2654static DEVICE_ATTR_RO(modalias);
2655
2656static struct attribute *hid_dev_attrs[] = {
2657 &dev_attr_modalias.attr,
2658 NULL,
2659};
2660static struct bin_attribute *hid_dev_bin_attrs[] = {
2661 &dev_bin_attr_report_desc,
2662 NULL
2663};
2664static const struct attribute_group hid_dev_group = {
2665 .attrs = hid_dev_attrs,
2666 .bin_attrs = hid_dev_bin_attrs,
2667};
2668__ATTRIBUTE_GROUPS(hid_dev);
2669
2670static int hid_uevent(struct device *dev, struct kobj_uevent_env *env)
2671{
2672 struct hid_device *hdev = to_hid_device(dev);
2673
2674 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2675 hdev->bus, hdev->vendor, hdev->product))
2676 return -ENOMEM;
2677
2678 if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2679 return -ENOMEM;
2680
2681 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2682 return -ENOMEM;
2683
2684 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2685 return -ENOMEM;
2686
2687 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2688 hdev->bus, hdev->group, hdev->vendor, hdev->product))
2689 return -ENOMEM;
2690
2691 return 0;
2692}
2693
2694struct bus_type hid_bus_type = {
2695 .name = "hid",
2696 .dev_groups = hid_dev_groups,
2697 .drv_groups = hid_drv_groups,
2698 .match = hid_bus_match,
2699 .probe = hid_device_probe,
2700 .remove = hid_device_remove,
2701 .uevent = hid_uevent,
2702};
2703EXPORT_SYMBOL(hid_bus_type);
2704
2705int hid_add_device(struct hid_device *hdev)
2706{
2707 static atomic_t id = ATOMIC_INIT(0);
2708 int ret;
2709
2710 if (WARN_ON(hdev->status & HID_STAT_ADDED))
2711 return -EBUSY;
2712
2713 hdev->quirks = hid_lookup_quirk(hdev);
2714
2715 /* we need to kill them here, otherwise they will stay allocated to
2716 * wait for coming driver */
2717 if (hid_ignore(hdev))
2718 return -ENODEV;
2719
2720 /*
2721 * Check for the mandatory transport channel.
2722 */
2723 if (!hdev->ll_driver->raw_request) {
2724 hid_err(hdev, "transport driver missing .raw_request()\n");
2725 return -EINVAL;
2726 }
2727
2728 /*
2729 * Read the device report descriptor once and use as template
2730 * for the driver-specific modifications.
2731 */
2732 ret = hdev->ll_driver->parse(hdev);
2733 if (ret)
2734 return ret;
2735 if (!hdev->dev_rdesc)
2736 return -ENODEV;
2737
2738 /*
2739 * Scan generic devices for group information
2740 */
2741 if (hid_ignore_special_drivers) {
2742 hdev->group = HID_GROUP_GENERIC;
2743 } else if (!hdev->group &&
2744 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2745 ret = hid_scan_report(hdev);
2746 if (ret)
2747 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2748 }
2749
2750 hdev->id = atomic_inc_return(&id);
2751
2752 /* XXX hack, any other cleaner solution after the driver core
2753 * is converted to allow more than 20 bytes as the device name? */
2754 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2755 hdev->vendor, hdev->product, hdev->id);
2756
2757 hid_debug_register(hdev, dev_name(&hdev->dev));
2758 ret = device_add(&hdev->dev);
2759 if (!ret)
2760 hdev->status |= HID_STAT_ADDED;
2761 else
2762 hid_debug_unregister(hdev);
2763
2764 return ret;
2765}
2766EXPORT_SYMBOL_GPL(hid_add_device);
2767
2768/**
2769 * hid_allocate_device - allocate new hid device descriptor
2770 *
2771 * Allocate and initialize hid device, so that hid_destroy_device might be
2772 * used to free it.
2773 *
2774 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2775 * error value.
2776 */
2777struct hid_device *hid_allocate_device(void)
2778{
2779 struct hid_device *hdev;
2780 int ret = -ENOMEM;
2781
2782 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2783 if (hdev == NULL)
2784 return ERR_PTR(ret);
2785
2786 device_initialize(&hdev->dev);
2787 hdev->dev.release = hid_device_release;
2788 hdev->dev.bus = &hid_bus_type;
2789 device_enable_async_suspend(&hdev->dev);
2790
2791 hid_close_report(hdev);
2792
2793 init_waitqueue_head(&hdev->debug_wait);
2794 INIT_LIST_HEAD(&hdev->debug_list);
2795 spin_lock_init(&hdev->debug_list_lock);
2796 sema_init(&hdev->driver_input_lock, 1);
2797 mutex_init(&hdev->ll_open_lock);
2798
2799 return hdev;
2800}
2801EXPORT_SYMBOL_GPL(hid_allocate_device);
2802
2803static void hid_remove_device(struct hid_device *hdev)
2804{
2805 if (hdev->status & HID_STAT_ADDED) {
2806 device_del(&hdev->dev);
2807 hid_debug_unregister(hdev);
2808 hdev->status &= ~HID_STAT_ADDED;
2809 }
2810 kfree(hdev->dev_rdesc);
2811 hdev->dev_rdesc = NULL;
2812 hdev->dev_rsize = 0;
2813}
2814
2815/**
2816 * hid_destroy_device - free previously allocated device
2817 *
2818 * @hdev: hid device
2819 *
2820 * If you allocate hid_device through hid_allocate_device, you should ever
2821 * free by this function.
2822 */
2823void hid_destroy_device(struct hid_device *hdev)
2824{
2825 hid_remove_device(hdev);
2826 put_device(&hdev->dev);
2827}
2828EXPORT_SYMBOL_GPL(hid_destroy_device);
2829
2830
2831static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2832{
2833 struct hid_driver *hdrv = data;
2834 struct hid_device *hdev = to_hid_device(dev);
2835
2836 if (hdev->driver == hdrv &&
2837 !hdrv->match(hdev, hid_ignore_special_drivers) &&
2838 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2839 return device_reprobe(dev);
2840
2841 return 0;
2842}
2843
2844static int __hid_bus_driver_added(struct device_driver *drv, void *data)
2845{
2846 struct hid_driver *hdrv = to_hid_driver(drv);
2847
2848 if (hdrv->match) {
2849 bus_for_each_dev(&hid_bus_type, NULL, hdrv,
2850 __hid_bus_reprobe_drivers);
2851 }
2852
2853 return 0;
2854}
2855
2856static int __bus_removed_driver(struct device_driver *drv, void *data)
2857{
2858 return bus_rescan_devices(&hid_bus_type);
2859}
2860
2861int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
2862 const char *mod_name)
2863{
2864 int ret;
2865
2866 hdrv->driver.name = hdrv->name;
2867 hdrv->driver.bus = &hid_bus_type;
2868 hdrv->driver.owner = owner;
2869 hdrv->driver.mod_name = mod_name;
2870
2871 INIT_LIST_HEAD(&hdrv->dyn_list);
2872 spin_lock_init(&hdrv->dyn_lock);
2873
2874 ret = driver_register(&hdrv->driver);
2875
2876 if (ret == 0)
2877 bus_for_each_drv(&hid_bus_type, NULL, NULL,
2878 __hid_bus_driver_added);
2879
2880 return ret;
2881}
2882EXPORT_SYMBOL_GPL(__hid_register_driver);
2883
2884void hid_unregister_driver(struct hid_driver *hdrv)
2885{
2886 driver_unregister(&hdrv->driver);
2887 hid_free_dynids(hdrv);
2888
2889 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
2890}
2891EXPORT_SYMBOL_GPL(hid_unregister_driver);
2892
2893int hid_check_keys_pressed(struct hid_device *hid)
2894{
2895 struct hid_input *hidinput;
2896 int i;
2897
2898 if (!(hid->claimed & HID_CLAIMED_INPUT))
2899 return 0;
2900
2901 list_for_each_entry(hidinput, &hid->inputs, list) {
2902 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
2903 if (hidinput->input->key[i])
2904 return 1;
2905 }
2906
2907 return 0;
2908}
2909EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
2910
2911static int __init hid_init(void)
2912{
2913 int ret;
2914
2915 if (hid_debug)
2916 pr_warn("hid_debug is now used solely for parser and driver debugging.\n"
2917 "debugfs is now used for inspecting the device (report descriptor, reports)\n");
2918
2919 ret = bus_register(&hid_bus_type);
2920 if (ret) {
2921 pr_err("can't register hid bus\n");
2922 goto err;
2923 }
2924
2925 ret = hidraw_init();
2926 if (ret)
2927 goto err_bus;
2928
2929 hid_debug_init();
2930
2931 return 0;
2932err_bus:
2933 bus_unregister(&hid_bus_type);
2934err:
2935 return ret;
2936}
2937
2938static void __exit hid_exit(void)
2939{
2940 hid_debug_exit();
2941 hidraw_exit();
2942 bus_unregister(&hid_bus_type);
2943 hid_quirks_exit(HID_BUS_ANY);
2944}
2945
2946module_init(hid_init);
2947module_exit(hid_exit);
2948
2949MODULE_AUTHOR("Andreas Gal");
2950MODULE_AUTHOR("Vojtech Pavlik");
2951MODULE_AUTHOR("Jiri Kosina");
2952MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * HID support for Linux
4 *
5 * Copyright (c) 1999 Andreas Gal
6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
8 * Copyright (c) 2006-2012 Jiri Kosina
9 */
10
11/*
12 */
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/init.h>
19#include <linux/kernel.h>
20#include <linux/list.h>
21#include <linux/mm.h>
22#include <linux/spinlock.h>
23#include <linux/unaligned.h>
24#include <asm/byteorder.h>
25#include <linux/input.h>
26#include <linux/wait.h>
27#include <linux/vmalloc.h>
28#include <linux/sched.h>
29#include <linux/semaphore.h>
30
31#include <linux/hid.h>
32#include <linux/hiddev.h>
33#include <linux/hid-debug.h>
34#include <linux/hidraw.h>
35
36#include "hid-ids.h"
37
38/*
39 * Version Information
40 */
41
42#define DRIVER_DESC "HID core driver"
43
44static int hid_ignore_special_drivers = 0;
45module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
46MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
47
48/*
49 * Convert a signed n-bit integer to signed 32-bit integer.
50 */
51
52static s32 snto32(__u32 value, unsigned int n)
53{
54 if (!value || !n)
55 return 0;
56
57 if (n > 32)
58 n = 32;
59
60 return sign_extend32(value, n - 1);
61}
62
63/*
64 * Convert a signed 32-bit integer to a signed n-bit integer.
65 */
66
67static u32 s32ton(__s32 value, unsigned int n)
68{
69 s32 a = value >> (n - 1);
70
71 if (a && a != -1)
72 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
73 return value & ((1 << n) - 1);
74}
75
76/*
77 * Register a new report for a device.
78 */
79
80struct hid_report *hid_register_report(struct hid_device *device,
81 enum hid_report_type type, unsigned int id,
82 unsigned int application)
83{
84 struct hid_report_enum *report_enum = device->report_enum + type;
85 struct hid_report *report;
86
87 if (id >= HID_MAX_IDS)
88 return NULL;
89 if (report_enum->report_id_hash[id])
90 return report_enum->report_id_hash[id];
91
92 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
93 if (!report)
94 return NULL;
95
96 if (id != 0)
97 report_enum->numbered = 1;
98
99 report->id = id;
100 report->type = type;
101 report->size = 0;
102 report->device = device;
103 report->application = application;
104 report_enum->report_id_hash[id] = report;
105
106 list_add_tail(&report->list, &report_enum->report_list);
107 INIT_LIST_HEAD(&report->field_entry_list);
108
109 return report;
110}
111EXPORT_SYMBOL_GPL(hid_register_report);
112
113/*
114 * Register a new field for this report.
115 */
116
117static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
118{
119 struct hid_field *field;
120
121 if (report->maxfield == HID_MAX_FIELDS) {
122 hid_err(report->device, "too many fields in report\n");
123 return NULL;
124 }
125
126 field = kvzalloc((sizeof(struct hid_field) +
127 usages * sizeof(struct hid_usage) +
128 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
129 if (!field)
130 return NULL;
131
132 field->index = report->maxfield++;
133 report->field[field->index] = field;
134 field->usage = (struct hid_usage *)(field + 1);
135 field->value = (s32 *)(field->usage + usages);
136 field->new_value = (s32 *)(field->value + usages);
137 field->usages_priorities = (s32 *)(field->new_value + usages);
138 field->report = report;
139
140 return field;
141}
142
143/*
144 * Open a collection. The type/usage is pushed on the stack.
145 */
146
147static int open_collection(struct hid_parser *parser, unsigned type)
148{
149 struct hid_collection *collection;
150 unsigned usage;
151 int collection_index;
152
153 usage = parser->local.usage[0];
154
155 if (parser->collection_stack_ptr == parser->collection_stack_size) {
156 unsigned int *collection_stack;
157 unsigned int new_size = parser->collection_stack_size +
158 HID_COLLECTION_STACK_SIZE;
159
160 collection_stack = krealloc(parser->collection_stack,
161 new_size * sizeof(unsigned int),
162 GFP_KERNEL);
163 if (!collection_stack)
164 return -ENOMEM;
165
166 parser->collection_stack = collection_stack;
167 parser->collection_stack_size = new_size;
168 }
169
170 if (parser->device->maxcollection == parser->device->collection_size) {
171 collection = kmalloc(
172 array3_size(sizeof(struct hid_collection),
173 parser->device->collection_size,
174 2),
175 GFP_KERNEL);
176 if (collection == NULL) {
177 hid_err(parser->device, "failed to reallocate collection array\n");
178 return -ENOMEM;
179 }
180 memcpy(collection, parser->device->collection,
181 sizeof(struct hid_collection) *
182 parser->device->collection_size);
183 memset(collection + parser->device->collection_size, 0,
184 sizeof(struct hid_collection) *
185 parser->device->collection_size);
186 kfree(parser->device->collection);
187 parser->device->collection = collection;
188 parser->device->collection_size *= 2;
189 }
190
191 parser->collection_stack[parser->collection_stack_ptr++] =
192 parser->device->maxcollection;
193
194 collection_index = parser->device->maxcollection++;
195 collection = parser->device->collection + collection_index;
196 collection->type = type;
197 collection->usage = usage;
198 collection->level = parser->collection_stack_ptr - 1;
199 collection->parent_idx = (collection->level == 0) ? -1 :
200 parser->collection_stack[collection->level - 1];
201
202 if (type == HID_COLLECTION_APPLICATION)
203 parser->device->maxapplication++;
204
205 return 0;
206}
207
208/*
209 * Close a collection.
210 */
211
212static int close_collection(struct hid_parser *parser)
213{
214 if (!parser->collection_stack_ptr) {
215 hid_err(parser->device, "collection stack underflow\n");
216 return -EINVAL;
217 }
218 parser->collection_stack_ptr--;
219 return 0;
220}
221
222/*
223 * Climb up the stack, search for the specified collection type
224 * and return the usage.
225 */
226
227static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
228{
229 struct hid_collection *collection = parser->device->collection;
230 int n;
231
232 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
233 unsigned index = parser->collection_stack[n];
234 if (collection[index].type == type)
235 return collection[index].usage;
236 }
237 return 0; /* we know nothing about this usage type */
238}
239
240/*
241 * Concatenate usage which defines 16 bits or less with the
242 * currently defined usage page to form a 32 bit usage
243 */
244
245static void complete_usage(struct hid_parser *parser, unsigned int index)
246{
247 parser->local.usage[index] &= 0xFFFF;
248 parser->local.usage[index] |=
249 (parser->global.usage_page & 0xFFFF) << 16;
250}
251
252/*
253 * Add a usage to the temporary parser table.
254 */
255
256static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
257{
258 if (parser->local.usage_index >= HID_MAX_USAGES) {
259 hid_err(parser->device, "usage index exceeded\n");
260 return -1;
261 }
262 parser->local.usage[parser->local.usage_index] = usage;
263
264 /*
265 * If Usage item only includes usage id, concatenate it with
266 * currently defined usage page
267 */
268 if (size <= 2)
269 complete_usage(parser, parser->local.usage_index);
270
271 parser->local.usage_size[parser->local.usage_index] = size;
272 parser->local.collection_index[parser->local.usage_index] =
273 parser->collection_stack_ptr ?
274 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
275 parser->local.usage_index++;
276 return 0;
277}
278
279/*
280 * Register a new field for this report.
281 */
282
283static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
284{
285 struct hid_report *report;
286 struct hid_field *field;
287 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
288 unsigned int usages;
289 unsigned int offset;
290 unsigned int i;
291 unsigned int application;
292
293 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
294
295 report = hid_register_report(parser->device, report_type,
296 parser->global.report_id, application);
297 if (!report) {
298 hid_err(parser->device, "hid_register_report failed\n");
299 return -1;
300 }
301
302 /* Handle both signed and unsigned cases properly */
303 if ((parser->global.logical_minimum < 0 &&
304 parser->global.logical_maximum <
305 parser->global.logical_minimum) ||
306 (parser->global.logical_minimum >= 0 &&
307 (__u32)parser->global.logical_maximum <
308 (__u32)parser->global.logical_minimum)) {
309 dbg_hid("logical range invalid 0x%x 0x%x\n",
310 parser->global.logical_minimum,
311 parser->global.logical_maximum);
312 return -1;
313 }
314
315 offset = report->size;
316 report->size += parser->global.report_size * parser->global.report_count;
317
318 if (parser->device->ll_driver->max_buffer_size)
319 max_buffer_size = parser->device->ll_driver->max_buffer_size;
320
321 /* Total size check: Allow for possible report index byte */
322 if (report->size > (max_buffer_size - 1) << 3) {
323 hid_err(parser->device, "report is too long\n");
324 return -1;
325 }
326
327 if (!parser->local.usage_index) /* Ignore padding fields */
328 return 0;
329
330 usages = max_t(unsigned, parser->local.usage_index,
331 parser->global.report_count);
332
333 field = hid_register_field(report, usages);
334 if (!field)
335 return 0;
336
337 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
338 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
339 field->application = application;
340
341 for (i = 0; i < usages; i++) {
342 unsigned j = i;
343 /* Duplicate the last usage we parsed if we have excess values */
344 if (i >= parser->local.usage_index)
345 j = parser->local.usage_index - 1;
346 field->usage[i].hid = parser->local.usage[j];
347 field->usage[i].collection_index =
348 parser->local.collection_index[j];
349 field->usage[i].usage_index = i;
350 field->usage[i].resolution_multiplier = 1;
351 }
352
353 field->maxusage = usages;
354 field->flags = flags;
355 field->report_offset = offset;
356 field->report_type = report_type;
357 field->report_size = parser->global.report_size;
358 field->report_count = parser->global.report_count;
359 field->logical_minimum = parser->global.logical_minimum;
360 field->logical_maximum = parser->global.logical_maximum;
361 field->physical_minimum = parser->global.physical_minimum;
362 field->physical_maximum = parser->global.physical_maximum;
363 field->unit_exponent = parser->global.unit_exponent;
364 field->unit = parser->global.unit;
365
366 return 0;
367}
368
369/*
370 * Read data value from item.
371 */
372
373static u32 item_udata(struct hid_item *item)
374{
375 switch (item->size) {
376 case 1: return item->data.u8;
377 case 2: return item->data.u16;
378 case 4: return item->data.u32;
379 }
380 return 0;
381}
382
383static s32 item_sdata(struct hid_item *item)
384{
385 switch (item->size) {
386 case 1: return item->data.s8;
387 case 2: return item->data.s16;
388 case 4: return item->data.s32;
389 }
390 return 0;
391}
392
393/*
394 * Process a global item.
395 */
396
397static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
398{
399 __s32 raw_value;
400 switch (item->tag) {
401 case HID_GLOBAL_ITEM_TAG_PUSH:
402
403 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
404 hid_err(parser->device, "global environment stack overflow\n");
405 return -1;
406 }
407
408 memcpy(parser->global_stack + parser->global_stack_ptr++,
409 &parser->global, sizeof(struct hid_global));
410 return 0;
411
412 case HID_GLOBAL_ITEM_TAG_POP:
413
414 if (!parser->global_stack_ptr) {
415 hid_err(parser->device, "global environment stack underflow\n");
416 return -1;
417 }
418
419 memcpy(&parser->global, parser->global_stack +
420 --parser->global_stack_ptr, sizeof(struct hid_global));
421 return 0;
422
423 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
424 parser->global.usage_page = item_udata(item);
425 return 0;
426
427 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
428 parser->global.logical_minimum = item_sdata(item);
429 return 0;
430
431 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
432 if (parser->global.logical_minimum < 0)
433 parser->global.logical_maximum = item_sdata(item);
434 else
435 parser->global.logical_maximum = item_udata(item);
436 return 0;
437
438 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
439 parser->global.physical_minimum = item_sdata(item);
440 return 0;
441
442 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
443 if (parser->global.physical_minimum < 0)
444 parser->global.physical_maximum = item_sdata(item);
445 else
446 parser->global.physical_maximum = item_udata(item);
447 return 0;
448
449 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
450 /* Many devices provide unit exponent as a two's complement
451 * nibble due to the common misunderstanding of HID
452 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
453 * both this and the standard encoding. */
454 raw_value = item_sdata(item);
455 if (!(raw_value & 0xfffffff0))
456 parser->global.unit_exponent = snto32(raw_value, 4);
457 else
458 parser->global.unit_exponent = raw_value;
459 return 0;
460
461 case HID_GLOBAL_ITEM_TAG_UNIT:
462 parser->global.unit = item_udata(item);
463 return 0;
464
465 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
466 parser->global.report_size = item_udata(item);
467 if (parser->global.report_size > 256) {
468 hid_err(parser->device, "invalid report_size %d\n",
469 parser->global.report_size);
470 return -1;
471 }
472 return 0;
473
474 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
475 parser->global.report_count = item_udata(item);
476 if (parser->global.report_count > HID_MAX_USAGES) {
477 hid_err(parser->device, "invalid report_count %d\n",
478 parser->global.report_count);
479 return -1;
480 }
481 return 0;
482
483 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
484 parser->global.report_id = item_udata(item);
485 if (parser->global.report_id == 0 ||
486 parser->global.report_id >= HID_MAX_IDS) {
487 hid_err(parser->device, "report_id %u is invalid\n",
488 parser->global.report_id);
489 return -1;
490 }
491 return 0;
492
493 default:
494 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
495 return -1;
496 }
497}
498
499/*
500 * Process a local item.
501 */
502
503static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
504{
505 __u32 data;
506 unsigned n;
507 __u32 count;
508
509 data = item_udata(item);
510
511 switch (item->tag) {
512 case HID_LOCAL_ITEM_TAG_DELIMITER:
513
514 if (data) {
515 /*
516 * We treat items before the first delimiter
517 * as global to all usage sets (branch 0).
518 * In the moment we process only these global
519 * items and the first delimiter set.
520 */
521 if (parser->local.delimiter_depth != 0) {
522 hid_err(parser->device, "nested delimiters\n");
523 return -1;
524 }
525 parser->local.delimiter_depth++;
526 parser->local.delimiter_branch++;
527 } else {
528 if (parser->local.delimiter_depth < 1) {
529 hid_err(parser->device, "bogus close delimiter\n");
530 return -1;
531 }
532 parser->local.delimiter_depth--;
533 }
534 return 0;
535
536 case HID_LOCAL_ITEM_TAG_USAGE:
537
538 if (parser->local.delimiter_branch > 1) {
539 dbg_hid("alternative usage ignored\n");
540 return 0;
541 }
542
543 return hid_add_usage(parser, data, item->size);
544
545 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
546
547 if (parser->local.delimiter_branch > 1) {
548 dbg_hid("alternative usage ignored\n");
549 return 0;
550 }
551
552 parser->local.usage_minimum = data;
553 return 0;
554
555 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
556
557 if (parser->local.delimiter_branch > 1) {
558 dbg_hid("alternative usage ignored\n");
559 return 0;
560 }
561
562 count = data - parser->local.usage_minimum;
563 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
564 /*
565 * We do not warn if the name is not set, we are
566 * actually pre-scanning the device.
567 */
568 if (dev_name(&parser->device->dev))
569 hid_warn(parser->device,
570 "ignoring exceeding usage max\n");
571 data = HID_MAX_USAGES - parser->local.usage_index +
572 parser->local.usage_minimum - 1;
573 if (data <= 0) {
574 hid_err(parser->device,
575 "no more usage index available\n");
576 return -1;
577 }
578 }
579
580 for (n = parser->local.usage_minimum; n <= data; n++)
581 if (hid_add_usage(parser, n, item->size)) {
582 dbg_hid("hid_add_usage failed\n");
583 return -1;
584 }
585 return 0;
586
587 default:
588
589 dbg_hid("unknown local item tag 0x%x\n", item->tag);
590 return 0;
591 }
592 return 0;
593}
594
595/*
596 * Concatenate Usage Pages into Usages where relevant:
597 * As per specification, 6.2.2.8: "When the parser encounters a main item it
598 * concatenates the last declared Usage Page with a Usage to form a complete
599 * usage value."
600 */
601
602static void hid_concatenate_last_usage_page(struct hid_parser *parser)
603{
604 int i;
605 unsigned int usage_page;
606 unsigned int current_page;
607
608 if (!parser->local.usage_index)
609 return;
610
611 usage_page = parser->global.usage_page;
612
613 /*
614 * Concatenate usage page again only if last declared Usage Page
615 * has not been already used in previous usages concatenation
616 */
617 for (i = parser->local.usage_index - 1; i >= 0; i--) {
618 if (parser->local.usage_size[i] > 2)
619 /* Ignore extended usages */
620 continue;
621
622 current_page = parser->local.usage[i] >> 16;
623 if (current_page == usage_page)
624 break;
625
626 complete_usage(parser, i);
627 }
628}
629
630/*
631 * Process a main item.
632 */
633
634static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
635{
636 __u32 data;
637 int ret;
638
639 hid_concatenate_last_usage_page(parser);
640
641 data = item_udata(item);
642
643 switch (item->tag) {
644 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
645 ret = open_collection(parser, data & 0xff);
646 break;
647 case HID_MAIN_ITEM_TAG_END_COLLECTION:
648 ret = close_collection(parser);
649 break;
650 case HID_MAIN_ITEM_TAG_INPUT:
651 ret = hid_add_field(parser, HID_INPUT_REPORT, data);
652 break;
653 case HID_MAIN_ITEM_TAG_OUTPUT:
654 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
655 break;
656 case HID_MAIN_ITEM_TAG_FEATURE:
657 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
658 break;
659 default:
660 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
661 ret = 0;
662 }
663
664 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */
665
666 return ret;
667}
668
669/*
670 * Process a reserved item.
671 */
672
673static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
674{
675 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
676 return 0;
677}
678
679/*
680 * Free a report and all registered fields. The field->usage and
681 * field->value table's are allocated behind the field, so we need
682 * only to free(field) itself.
683 */
684
685static void hid_free_report(struct hid_report *report)
686{
687 unsigned n;
688
689 kfree(report->field_entries);
690
691 for (n = 0; n < report->maxfield; n++)
692 kvfree(report->field[n]);
693 kfree(report);
694}
695
696/*
697 * Close report. This function returns the device
698 * state to the point prior to hid_open_report().
699 */
700static void hid_close_report(struct hid_device *device)
701{
702 unsigned i, j;
703
704 for (i = 0; i < HID_REPORT_TYPES; i++) {
705 struct hid_report_enum *report_enum = device->report_enum + i;
706
707 for (j = 0; j < HID_MAX_IDS; j++) {
708 struct hid_report *report = report_enum->report_id_hash[j];
709 if (report)
710 hid_free_report(report);
711 }
712 memset(report_enum, 0, sizeof(*report_enum));
713 INIT_LIST_HEAD(&report_enum->report_list);
714 }
715
716 /*
717 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
718 * will be allocated by hid-core and needs to be freed.
719 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
720 * which cases it'll be freed later on device removal or destroy.
721 */
722 if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
723 kfree(device->rdesc);
724 device->rdesc = NULL;
725 device->rsize = 0;
726
727 kfree(device->collection);
728 device->collection = NULL;
729 device->collection_size = 0;
730 device->maxcollection = 0;
731 device->maxapplication = 0;
732
733 device->status &= ~HID_STAT_PARSED;
734}
735
736static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
737{
738 /* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
739 if (hdev->bpf_rdesc != hdev->dev_rdesc)
740 kfree(hdev->bpf_rdesc);
741 hdev->bpf_rdesc = NULL;
742}
743
744/*
745 * Free a device structure, all reports, and all fields.
746 */
747
748void hiddev_free(struct kref *ref)
749{
750 struct hid_device *hid = container_of(ref, struct hid_device, ref);
751
752 hid_close_report(hid);
753 hid_free_bpf_rdesc(hid);
754 kfree(hid->dev_rdesc);
755 kfree(hid);
756}
757
758static void hid_device_release(struct device *dev)
759{
760 struct hid_device *hid = to_hid_device(dev);
761
762 kref_put(&hid->ref, hiddev_free);
763}
764
765/*
766 * Fetch a report description item from the data stream. We support long
767 * items, though they are not used yet.
768 */
769
770static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
771{
772 u8 b;
773
774 if ((end - start) <= 0)
775 return NULL;
776
777 b = *start++;
778
779 item->type = (b >> 2) & 3;
780 item->tag = (b >> 4) & 15;
781
782 if (item->tag == HID_ITEM_TAG_LONG) {
783
784 item->format = HID_ITEM_FORMAT_LONG;
785
786 if ((end - start) < 2)
787 return NULL;
788
789 item->size = *start++;
790 item->tag = *start++;
791
792 if ((end - start) < item->size)
793 return NULL;
794
795 item->data.longdata = start;
796 start += item->size;
797 return start;
798 }
799
800 item->format = HID_ITEM_FORMAT_SHORT;
801 item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
802
803 if (end - start < item->size)
804 return NULL;
805
806 switch (item->size) {
807 case 0:
808 break;
809
810 case 1:
811 item->data.u8 = *start;
812 break;
813
814 case 2:
815 item->data.u16 = get_unaligned_le16(start);
816 break;
817
818 case 4:
819 item->data.u32 = get_unaligned_le32(start);
820 break;
821 }
822
823 return start + item->size;
824}
825
826static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
827{
828 struct hid_device *hid = parser->device;
829
830 if (usage == HID_DG_CONTACTID)
831 hid->group = HID_GROUP_MULTITOUCH;
832}
833
834static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
835{
836 if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
837 parser->global.report_size == 8)
838 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
839
840 if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
841 parser->global.report_size == 8)
842 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
843}
844
845static void hid_scan_collection(struct hid_parser *parser, unsigned type)
846{
847 struct hid_device *hid = parser->device;
848 int i;
849
850 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
851 (type == HID_COLLECTION_PHYSICAL ||
852 type == HID_COLLECTION_APPLICATION))
853 hid->group = HID_GROUP_SENSOR_HUB;
854
855 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
856 hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
857 hid->group == HID_GROUP_MULTITOUCH)
858 hid->group = HID_GROUP_GENERIC;
859
860 if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
861 for (i = 0; i < parser->local.usage_index; i++)
862 if (parser->local.usage[i] == HID_GD_POINTER)
863 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
864
865 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
866 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
867
868 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
869 for (i = 0; i < parser->local.usage_index; i++)
870 if (parser->local.usage[i] ==
871 (HID_UP_GOOGLEVENDOR | 0x0001))
872 parser->device->group =
873 HID_GROUP_VIVALDI;
874}
875
876static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
877{
878 __u32 data;
879 int i;
880
881 hid_concatenate_last_usage_page(parser);
882
883 data = item_udata(item);
884
885 switch (item->tag) {
886 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
887 hid_scan_collection(parser, data & 0xff);
888 break;
889 case HID_MAIN_ITEM_TAG_END_COLLECTION:
890 break;
891 case HID_MAIN_ITEM_TAG_INPUT:
892 /* ignore constant inputs, they will be ignored by hid-input */
893 if (data & HID_MAIN_ITEM_CONSTANT)
894 break;
895 for (i = 0; i < parser->local.usage_index; i++)
896 hid_scan_input_usage(parser, parser->local.usage[i]);
897 break;
898 case HID_MAIN_ITEM_TAG_OUTPUT:
899 break;
900 case HID_MAIN_ITEM_TAG_FEATURE:
901 for (i = 0; i < parser->local.usage_index; i++)
902 hid_scan_feature_usage(parser, parser->local.usage[i]);
903 break;
904 }
905
906 /* Reset the local parser environment */
907 memset(&parser->local, 0, sizeof(parser->local));
908
909 return 0;
910}
911
912/*
913 * Scan a report descriptor before the device is added to the bus.
914 * Sets device groups and other properties that determine what driver
915 * to load.
916 */
917static int hid_scan_report(struct hid_device *hid)
918{
919 struct hid_parser *parser;
920 struct hid_item item;
921 const __u8 *start = hid->dev_rdesc;
922 const __u8 *end = start + hid->dev_rsize;
923 static int (*dispatch_type[])(struct hid_parser *parser,
924 struct hid_item *item) = {
925 hid_scan_main,
926 hid_parser_global,
927 hid_parser_local,
928 hid_parser_reserved
929 };
930
931 parser = vzalloc(sizeof(struct hid_parser));
932 if (!parser)
933 return -ENOMEM;
934
935 parser->device = hid;
936 hid->group = HID_GROUP_GENERIC;
937
938 /*
939 * The parsing is simpler than the one in hid_open_report() as we should
940 * be robust against hid errors. Those errors will be raised by
941 * hid_open_report() anyway.
942 */
943 while ((start = fetch_item(start, end, &item)) != NULL)
944 dispatch_type[item.type](parser, &item);
945
946 /*
947 * Handle special flags set during scanning.
948 */
949 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
950 (hid->group == HID_GROUP_MULTITOUCH))
951 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
952
953 /*
954 * Vendor specific handlings
955 */
956 switch (hid->vendor) {
957 case USB_VENDOR_ID_WACOM:
958 hid->group = HID_GROUP_WACOM;
959 break;
960 case USB_VENDOR_ID_SYNAPTICS:
961 if (hid->group == HID_GROUP_GENERIC)
962 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
963 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
964 /*
965 * hid-rmi should take care of them,
966 * not hid-generic
967 */
968 hid->group = HID_GROUP_RMI;
969 break;
970 }
971
972 kfree(parser->collection_stack);
973 vfree(parser);
974 return 0;
975}
976
977/**
978 * hid_parse_report - parse device report
979 *
980 * @hid: hid device
981 * @start: report start
982 * @size: report size
983 *
984 * Allocate the device report as read by the bus driver. This function should
985 * only be called from parse() in ll drivers.
986 */
987int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
988{
989 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
990 if (!hid->dev_rdesc)
991 return -ENOMEM;
992 hid->dev_rsize = size;
993 return 0;
994}
995EXPORT_SYMBOL_GPL(hid_parse_report);
996
997static const char * const hid_report_names[] = {
998 "HID_INPUT_REPORT",
999 "HID_OUTPUT_REPORT",
1000 "HID_FEATURE_REPORT",
1001};
1002/**
1003 * hid_validate_values - validate existing device report's value indexes
1004 *
1005 * @hid: hid device
1006 * @type: which report type to examine
1007 * @id: which report ID to examine (0 for first)
1008 * @field_index: which report field to examine
1009 * @report_counts: expected number of values
1010 *
1011 * Validate the number of values in a given field of a given report, after
1012 * parsing.
1013 */
1014struct hid_report *hid_validate_values(struct hid_device *hid,
1015 enum hid_report_type type, unsigned int id,
1016 unsigned int field_index,
1017 unsigned int report_counts)
1018{
1019 struct hid_report *report;
1020
1021 if (type > HID_FEATURE_REPORT) {
1022 hid_err(hid, "invalid HID report type %u\n", type);
1023 return NULL;
1024 }
1025
1026 if (id >= HID_MAX_IDS) {
1027 hid_err(hid, "invalid HID report id %u\n", id);
1028 return NULL;
1029 }
1030
1031 /*
1032 * Explicitly not using hid_get_report() here since it depends on
1033 * ->numbered being checked, which may not always be the case when
1034 * drivers go to access report values.
1035 */
1036 if (id == 0) {
1037 /*
1038 * Validating on id 0 means we should examine the first
1039 * report in the list.
1040 */
1041 report = list_first_entry_or_null(
1042 &hid->report_enum[type].report_list,
1043 struct hid_report, list);
1044 } else {
1045 report = hid->report_enum[type].report_id_hash[id];
1046 }
1047 if (!report) {
1048 hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1049 return NULL;
1050 }
1051 if (report->maxfield <= field_index) {
1052 hid_err(hid, "not enough fields in %s %u\n",
1053 hid_report_names[type], id);
1054 return NULL;
1055 }
1056 if (report->field[field_index]->report_count < report_counts) {
1057 hid_err(hid, "not enough values in %s %u field %u\n",
1058 hid_report_names[type], id, field_index);
1059 return NULL;
1060 }
1061 return report;
1062}
1063EXPORT_SYMBOL_GPL(hid_validate_values);
1064
1065static int hid_calculate_multiplier(struct hid_device *hid,
1066 struct hid_field *multiplier)
1067{
1068 int m;
1069 __s32 v = *multiplier->value;
1070 __s32 lmin = multiplier->logical_minimum;
1071 __s32 lmax = multiplier->logical_maximum;
1072 __s32 pmin = multiplier->physical_minimum;
1073 __s32 pmax = multiplier->physical_maximum;
1074
1075 /*
1076 * "Because OS implementations will generally divide the control's
1077 * reported count by the Effective Resolution Multiplier, designers
1078 * should take care not to establish a potential Effective
1079 * Resolution Multiplier of zero."
1080 * HID Usage Table, v1.12, Section 4.3.1, p31
1081 */
1082 if (lmax - lmin == 0)
1083 return 1;
1084 /*
1085 * Handling the unit exponent is left as an exercise to whoever
1086 * finds a device where that exponent is not 0.
1087 */
1088 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1089 if (unlikely(multiplier->unit_exponent != 0)) {
1090 hid_warn(hid,
1091 "unsupported Resolution Multiplier unit exponent %d\n",
1092 multiplier->unit_exponent);
1093 }
1094
1095 /* There are no devices with an effective multiplier > 255 */
1096 if (unlikely(m == 0 || m > 255 || m < -255)) {
1097 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1098 m = 1;
1099 }
1100
1101 return m;
1102}
1103
1104static void hid_apply_multiplier_to_field(struct hid_device *hid,
1105 struct hid_field *field,
1106 struct hid_collection *multiplier_collection,
1107 int effective_multiplier)
1108{
1109 struct hid_collection *collection;
1110 struct hid_usage *usage;
1111 int i;
1112
1113 /*
1114 * If multiplier_collection is NULL, the multiplier applies
1115 * to all fields in the report.
1116 * Otherwise, it is the Logical Collection the multiplier applies to
1117 * but our field may be in a subcollection of that collection.
1118 */
1119 for (i = 0; i < field->maxusage; i++) {
1120 usage = &field->usage[i];
1121
1122 collection = &hid->collection[usage->collection_index];
1123 while (collection->parent_idx != -1 &&
1124 collection != multiplier_collection)
1125 collection = &hid->collection[collection->parent_idx];
1126
1127 if (collection->parent_idx != -1 ||
1128 multiplier_collection == NULL)
1129 usage->resolution_multiplier = effective_multiplier;
1130
1131 }
1132}
1133
1134static void hid_apply_multiplier(struct hid_device *hid,
1135 struct hid_field *multiplier)
1136{
1137 struct hid_report_enum *rep_enum;
1138 struct hid_report *rep;
1139 struct hid_field *field;
1140 struct hid_collection *multiplier_collection;
1141 int effective_multiplier;
1142 int i;
1143
1144 /*
1145 * "The Resolution Multiplier control must be contained in the same
1146 * Logical Collection as the control(s) to which it is to be applied.
1147 * If no Resolution Multiplier is defined, then the Resolution
1148 * Multiplier defaults to 1. If more than one control exists in a
1149 * Logical Collection, the Resolution Multiplier is associated with
1150 * all controls in the collection. If no Logical Collection is
1151 * defined, the Resolution Multiplier is associated with all
1152 * controls in the report."
1153 * HID Usage Table, v1.12, Section 4.3.1, p30
1154 *
1155 * Thus, search from the current collection upwards until we find a
1156 * logical collection. Then search all fields for that same parent
1157 * collection. Those are the fields the multiplier applies to.
1158 *
1159 * If we have more than one multiplier, it will overwrite the
1160 * applicable fields later.
1161 */
1162 multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1163 while (multiplier_collection->parent_idx != -1 &&
1164 multiplier_collection->type != HID_COLLECTION_LOGICAL)
1165 multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1166 if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1167 multiplier_collection = NULL;
1168
1169 effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1170
1171 rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1172 list_for_each_entry(rep, &rep_enum->report_list, list) {
1173 for (i = 0; i < rep->maxfield; i++) {
1174 field = rep->field[i];
1175 hid_apply_multiplier_to_field(hid, field,
1176 multiplier_collection,
1177 effective_multiplier);
1178 }
1179 }
1180}
1181
1182/*
1183 * hid_setup_resolution_multiplier - set up all resolution multipliers
1184 *
1185 * @device: hid device
1186 *
1187 * Search for all Resolution Multiplier Feature Reports and apply their
1188 * value to all matching Input items. This only updates the internal struct
1189 * fields.
1190 *
1191 * The Resolution Multiplier is applied by the hardware. If the multiplier
1192 * is anything other than 1, the hardware will send pre-multiplied events
1193 * so that the same physical interaction generates an accumulated
1194 * accumulated_value = value * * multiplier
1195 * This may be achieved by sending
1196 * - "value * multiplier" for each event, or
1197 * - "value" but "multiplier" times as frequently, or
1198 * - a combination of the above
1199 * The only guarantee is that the same physical interaction always generates
1200 * an accumulated 'value * multiplier'.
1201 *
1202 * This function must be called before any event processing and after
1203 * any SetRequest to the Resolution Multiplier.
1204 */
1205void hid_setup_resolution_multiplier(struct hid_device *hid)
1206{
1207 struct hid_report_enum *rep_enum;
1208 struct hid_report *rep;
1209 struct hid_usage *usage;
1210 int i, j;
1211
1212 rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1213 list_for_each_entry(rep, &rep_enum->report_list, list) {
1214 for (i = 0; i < rep->maxfield; i++) {
1215 /* Ignore if report count is out of bounds. */
1216 if (rep->field[i]->report_count < 1)
1217 continue;
1218
1219 for (j = 0; j < rep->field[i]->maxusage; j++) {
1220 usage = &rep->field[i]->usage[j];
1221 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1222 hid_apply_multiplier(hid,
1223 rep->field[i]);
1224 }
1225 }
1226 }
1227}
1228EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1229
1230/**
1231 * hid_open_report - open a driver-specific device report
1232 *
1233 * @device: hid device
1234 *
1235 * Parse a report description into a hid_device structure. Reports are
1236 * enumerated, fields are attached to these reports.
1237 * 0 returned on success, otherwise nonzero error value.
1238 *
1239 * This function (or the equivalent hid_parse() macro) should only be
1240 * called from probe() in drivers, before starting the device.
1241 */
1242int hid_open_report(struct hid_device *device)
1243{
1244 struct hid_parser *parser;
1245 struct hid_item item;
1246 unsigned int size;
1247 const __u8 *start;
1248 const __u8 *end;
1249 const __u8 *next;
1250 int ret;
1251 int i;
1252 static int (*dispatch_type[])(struct hid_parser *parser,
1253 struct hid_item *item) = {
1254 hid_parser_main,
1255 hid_parser_global,
1256 hid_parser_local,
1257 hid_parser_reserved
1258 };
1259
1260 if (WARN_ON(device->status & HID_STAT_PARSED))
1261 return -EBUSY;
1262
1263 start = device->bpf_rdesc;
1264 if (WARN_ON(!start))
1265 return -ENODEV;
1266 size = device->bpf_rsize;
1267
1268 if (device->driver->report_fixup) {
1269 /*
1270 * device->driver->report_fixup() needs to work
1271 * on a copy of our report descriptor so it can
1272 * change it.
1273 */
1274 __u8 *buf = kmemdup(start, size, GFP_KERNEL);
1275
1276 if (buf == NULL)
1277 return -ENOMEM;
1278
1279 start = device->driver->report_fixup(device, buf, &size);
1280
1281 /*
1282 * The second kmemdup is required in case report_fixup() returns
1283 * a static read-only memory, but we have no idea if that memory
1284 * needs to be cleaned up or not at the end.
1285 */
1286 start = kmemdup(start, size, GFP_KERNEL);
1287 kfree(buf);
1288 if (start == NULL)
1289 return -ENOMEM;
1290 }
1291
1292 device->rdesc = start;
1293 device->rsize = size;
1294
1295 parser = vzalloc(sizeof(struct hid_parser));
1296 if (!parser) {
1297 ret = -ENOMEM;
1298 goto alloc_err;
1299 }
1300
1301 parser->device = device;
1302
1303 end = start + size;
1304
1305 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1306 sizeof(struct hid_collection), GFP_KERNEL);
1307 if (!device->collection) {
1308 ret = -ENOMEM;
1309 goto err;
1310 }
1311 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1312 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1313 device->collection[i].parent_idx = -1;
1314
1315 ret = -EINVAL;
1316 while ((next = fetch_item(start, end, &item)) != NULL) {
1317 start = next;
1318
1319 if (item.format != HID_ITEM_FORMAT_SHORT) {
1320 hid_err(device, "unexpected long global item\n");
1321 goto err;
1322 }
1323
1324 if (dispatch_type[item.type](parser, &item)) {
1325 hid_err(device, "item %u %u %u %u parsing failed\n",
1326 item.format, (unsigned)item.size,
1327 (unsigned)item.type, (unsigned)item.tag);
1328 goto err;
1329 }
1330
1331 if (start == end) {
1332 if (parser->collection_stack_ptr) {
1333 hid_err(device, "unbalanced collection at end of report description\n");
1334 goto err;
1335 }
1336 if (parser->local.delimiter_depth) {
1337 hid_err(device, "unbalanced delimiter at end of report description\n");
1338 goto err;
1339 }
1340
1341 /*
1342 * fetch initial values in case the device's
1343 * default multiplier isn't the recommended 1
1344 */
1345 hid_setup_resolution_multiplier(device);
1346
1347 kfree(parser->collection_stack);
1348 vfree(parser);
1349 device->status |= HID_STAT_PARSED;
1350
1351 return 0;
1352 }
1353 }
1354
1355 hid_err(device, "item fetching failed at offset %u/%u\n",
1356 size - (unsigned int)(end - start), size);
1357err:
1358 kfree(parser->collection_stack);
1359alloc_err:
1360 vfree(parser);
1361 hid_close_report(device);
1362 return ret;
1363}
1364EXPORT_SYMBOL_GPL(hid_open_report);
1365
1366/*
1367 * Extract/implement a data field from/to a little endian report (bit array).
1368 *
1369 * Code sort-of follows HID spec:
1370 * http://www.usb.org/developers/hidpage/HID1_11.pdf
1371 *
1372 * While the USB HID spec allows unlimited length bit fields in "report
1373 * descriptors", most devices never use more than 16 bits.
1374 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1375 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1376 */
1377
1378static u32 __extract(u8 *report, unsigned offset, int n)
1379{
1380 unsigned int idx = offset / 8;
1381 unsigned int bit_nr = 0;
1382 unsigned int bit_shift = offset % 8;
1383 int bits_to_copy = 8 - bit_shift;
1384 u32 value = 0;
1385 u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1386
1387 while (n > 0) {
1388 value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1389 n -= bits_to_copy;
1390 bit_nr += bits_to_copy;
1391 bits_to_copy = 8;
1392 bit_shift = 0;
1393 idx++;
1394 }
1395
1396 return value & mask;
1397}
1398
1399u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1400 unsigned offset, unsigned n)
1401{
1402 if (n > 32) {
1403 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1404 __func__, n, current->comm);
1405 n = 32;
1406 }
1407
1408 return __extract(report, offset, n);
1409}
1410EXPORT_SYMBOL_GPL(hid_field_extract);
1411
1412/*
1413 * "implement" : set bits in a little endian bit stream.
1414 * Same concepts as "extract" (see comments above).
1415 * The data mangled in the bit stream remains in little endian
1416 * order the whole time. It make more sense to talk about
1417 * endianness of register values by considering a register
1418 * a "cached" copy of the little endian bit stream.
1419 */
1420
1421static void __implement(u8 *report, unsigned offset, int n, u32 value)
1422{
1423 unsigned int idx = offset / 8;
1424 unsigned int bit_shift = offset % 8;
1425 int bits_to_set = 8 - bit_shift;
1426
1427 while (n - bits_to_set >= 0) {
1428 report[idx] &= ~(0xff << bit_shift);
1429 report[idx] |= value << bit_shift;
1430 value >>= bits_to_set;
1431 n -= bits_to_set;
1432 bits_to_set = 8;
1433 bit_shift = 0;
1434 idx++;
1435 }
1436
1437 /* last nibble */
1438 if (n) {
1439 u8 bit_mask = ((1U << n) - 1);
1440 report[idx] &= ~(bit_mask << bit_shift);
1441 report[idx] |= value << bit_shift;
1442 }
1443}
1444
1445static void implement(const struct hid_device *hid, u8 *report,
1446 unsigned offset, unsigned n, u32 value)
1447{
1448 if (unlikely(n > 32)) {
1449 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1450 __func__, n, current->comm);
1451 n = 32;
1452 } else if (n < 32) {
1453 u32 m = (1U << n) - 1;
1454
1455 if (unlikely(value > m)) {
1456 hid_warn(hid,
1457 "%s() called with too large value %d (n: %d)! (%s)\n",
1458 __func__, value, n, current->comm);
1459 value &= m;
1460 }
1461 }
1462
1463 __implement(report, offset, n, value);
1464}
1465
1466/*
1467 * Search an array for a value.
1468 */
1469
1470static int search(__s32 *array, __s32 value, unsigned n)
1471{
1472 while (n--) {
1473 if (*array++ == value)
1474 return 0;
1475 }
1476 return -1;
1477}
1478
1479/**
1480 * hid_match_report - check if driver's raw_event should be called
1481 *
1482 * @hid: hid device
1483 * @report: hid report to match against
1484 *
1485 * compare hid->driver->report_table->report_type to report->type
1486 */
1487static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1488{
1489 const struct hid_report_id *id = hid->driver->report_table;
1490
1491 if (!id) /* NULL means all */
1492 return 1;
1493
1494 for (; id->report_type != HID_TERMINATOR; id++)
1495 if (id->report_type == HID_ANY_ID ||
1496 id->report_type == report->type)
1497 return 1;
1498 return 0;
1499}
1500
1501/**
1502 * hid_match_usage - check if driver's event should be called
1503 *
1504 * @hid: hid device
1505 * @usage: usage to match against
1506 *
1507 * compare hid->driver->usage_table->usage_{type,code} to
1508 * usage->usage_{type,code}
1509 */
1510static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1511{
1512 const struct hid_usage_id *id = hid->driver->usage_table;
1513
1514 if (!id) /* NULL means all */
1515 return 1;
1516
1517 for (; id->usage_type != HID_ANY_ID - 1; id++)
1518 if ((id->usage_hid == HID_ANY_ID ||
1519 id->usage_hid == usage->hid) &&
1520 (id->usage_type == HID_ANY_ID ||
1521 id->usage_type == usage->type) &&
1522 (id->usage_code == HID_ANY_ID ||
1523 id->usage_code == usage->code))
1524 return 1;
1525 return 0;
1526}
1527
1528static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1529 struct hid_usage *usage, __s32 value, int interrupt)
1530{
1531 struct hid_driver *hdrv = hid->driver;
1532 int ret;
1533
1534 if (!list_empty(&hid->debug_list))
1535 hid_dump_input(hid, usage, value);
1536
1537 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1538 ret = hdrv->event(hid, field, usage, value);
1539 if (ret != 0) {
1540 if (ret < 0)
1541 hid_err(hid, "%s's event failed with %d\n",
1542 hdrv->name, ret);
1543 return;
1544 }
1545 }
1546
1547 if (hid->claimed & HID_CLAIMED_INPUT)
1548 hidinput_hid_event(hid, field, usage, value);
1549 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1550 hid->hiddev_hid_event(hid, field, usage, value);
1551}
1552
1553/*
1554 * Checks if the given value is valid within this field
1555 */
1556static inline int hid_array_value_is_valid(struct hid_field *field,
1557 __s32 value)
1558{
1559 __s32 min = field->logical_minimum;
1560
1561 /*
1562 * Value needs to be between logical min and max, and
1563 * (value - min) is used as an index in the usage array.
1564 * This array is of size field->maxusage
1565 */
1566 return value >= min &&
1567 value <= field->logical_maximum &&
1568 value - min < field->maxusage;
1569}
1570
1571/*
1572 * Fetch the field from the data. The field content is stored for next
1573 * report processing (we do differential reporting to the layer).
1574 */
1575static void hid_input_fetch_field(struct hid_device *hid,
1576 struct hid_field *field,
1577 __u8 *data)
1578{
1579 unsigned n;
1580 unsigned count = field->report_count;
1581 unsigned offset = field->report_offset;
1582 unsigned size = field->report_size;
1583 __s32 min = field->logical_minimum;
1584 __s32 *value;
1585
1586 value = field->new_value;
1587 memset(value, 0, count * sizeof(__s32));
1588 field->ignored = false;
1589
1590 for (n = 0; n < count; n++) {
1591
1592 value[n] = min < 0 ?
1593 snto32(hid_field_extract(hid, data, offset + n * size,
1594 size), size) :
1595 hid_field_extract(hid, data, offset + n * size, size);
1596
1597 /* Ignore report if ErrorRollOver */
1598 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1599 hid_array_value_is_valid(field, value[n]) &&
1600 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1601 field->ignored = true;
1602 return;
1603 }
1604 }
1605}
1606
1607/*
1608 * Process a received variable field.
1609 */
1610
1611static void hid_input_var_field(struct hid_device *hid,
1612 struct hid_field *field,
1613 int interrupt)
1614{
1615 unsigned int count = field->report_count;
1616 __s32 *value = field->new_value;
1617 unsigned int n;
1618
1619 for (n = 0; n < count; n++)
1620 hid_process_event(hid,
1621 field,
1622 &field->usage[n],
1623 value[n],
1624 interrupt);
1625
1626 memcpy(field->value, value, count * sizeof(__s32));
1627}
1628
1629/*
1630 * Process a received array field. The field content is stored for
1631 * next report processing (we do differential reporting to the layer).
1632 */
1633
1634static void hid_input_array_field(struct hid_device *hid,
1635 struct hid_field *field,
1636 int interrupt)
1637{
1638 unsigned int n;
1639 unsigned int count = field->report_count;
1640 __s32 min = field->logical_minimum;
1641 __s32 *value;
1642
1643 value = field->new_value;
1644
1645 /* ErrorRollOver */
1646 if (field->ignored)
1647 return;
1648
1649 for (n = 0; n < count; n++) {
1650 if (hid_array_value_is_valid(field, field->value[n]) &&
1651 search(value, field->value[n], count))
1652 hid_process_event(hid,
1653 field,
1654 &field->usage[field->value[n] - min],
1655 0,
1656 interrupt);
1657
1658 if (hid_array_value_is_valid(field, value[n]) &&
1659 search(field->value, value[n], count))
1660 hid_process_event(hid,
1661 field,
1662 &field->usage[value[n] - min],
1663 1,
1664 interrupt);
1665 }
1666
1667 memcpy(field->value, value, count * sizeof(__s32));
1668}
1669
1670/*
1671 * Analyse a received report, and fetch the data from it. The field
1672 * content is stored for next report processing (we do differential
1673 * reporting to the layer).
1674 */
1675static void hid_process_report(struct hid_device *hid,
1676 struct hid_report *report,
1677 __u8 *data,
1678 int interrupt)
1679{
1680 unsigned int a;
1681 struct hid_field_entry *entry;
1682 struct hid_field *field;
1683
1684 /* first retrieve all incoming values in data */
1685 for (a = 0; a < report->maxfield; a++)
1686 hid_input_fetch_field(hid, report->field[a], data);
1687
1688 if (!list_empty(&report->field_entry_list)) {
1689 /* INPUT_REPORT, we have a priority list of fields */
1690 list_for_each_entry(entry,
1691 &report->field_entry_list,
1692 list) {
1693 field = entry->field;
1694
1695 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1696 hid_process_event(hid,
1697 field,
1698 &field->usage[entry->index],
1699 field->new_value[entry->index],
1700 interrupt);
1701 else
1702 hid_input_array_field(hid, field, interrupt);
1703 }
1704
1705 /* we need to do the memcpy at the end for var items */
1706 for (a = 0; a < report->maxfield; a++) {
1707 field = report->field[a];
1708
1709 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1710 memcpy(field->value, field->new_value,
1711 field->report_count * sizeof(__s32));
1712 }
1713 } else {
1714 /* FEATURE_REPORT, regular processing */
1715 for (a = 0; a < report->maxfield; a++) {
1716 field = report->field[a];
1717
1718 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1719 hid_input_var_field(hid, field, interrupt);
1720 else
1721 hid_input_array_field(hid, field, interrupt);
1722 }
1723 }
1724}
1725
1726/*
1727 * Insert a given usage_index in a field in the list
1728 * of processed usages in the report.
1729 *
1730 * The elements of lower priority score are processed
1731 * first.
1732 */
1733static void __hid_insert_field_entry(struct hid_device *hid,
1734 struct hid_report *report,
1735 struct hid_field_entry *entry,
1736 struct hid_field *field,
1737 unsigned int usage_index)
1738{
1739 struct hid_field_entry *next;
1740
1741 entry->field = field;
1742 entry->index = usage_index;
1743 entry->priority = field->usages_priorities[usage_index];
1744
1745 /* insert the element at the correct position */
1746 list_for_each_entry(next,
1747 &report->field_entry_list,
1748 list) {
1749 /*
1750 * the priority of our element is strictly higher
1751 * than the next one, insert it before
1752 */
1753 if (entry->priority > next->priority) {
1754 list_add_tail(&entry->list, &next->list);
1755 return;
1756 }
1757 }
1758
1759 /* lowest priority score: insert at the end */
1760 list_add_tail(&entry->list, &report->field_entry_list);
1761}
1762
1763static void hid_report_process_ordering(struct hid_device *hid,
1764 struct hid_report *report)
1765{
1766 struct hid_field *field;
1767 struct hid_field_entry *entries;
1768 unsigned int a, u, usages;
1769 unsigned int count = 0;
1770
1771 /* count the number of individual fields in the report */
1772 for (a = 0; a < report->maxfield; a++) {
1773 field = report->field[a];
1774
1775 if (field->flags & HID_MAIN_ITEM_VARIABLE)
1776 count += field->report_count;
1777 else
1778 count++;
1779 }
1780
1781 /* allocate the memory to process the fields */
1782 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1783 if (!entries)
1784 return;
1785
1786 report->field_entries = entries;
1787
1788 /*
1789 * walk through all fields in the report and
1790 * store them by priority order in report->field_entry_list
1791 *
1792 * - Var elements are individualized (field + usage_index)
1793 * - Arrays are taken as one, we can not chose an order for them
1794 */
1795 usages = 0;
1796 for (a = 0; a < report->maxfield; a++) {
1797 field = report->field[a];
1798
1799 if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1800 for (u = 0; u < field->report_count; u++) {
1801 __hid_insert_field_entry(hid, report,
1802 &entries[usages],
1803 field, u);
1804 usages++;
1805 }
1806 } else {
1807 __hid_insert_field_entry(hid, report, &entries[usages],
1808 field, 0);
1809 usages++;
1810 }
1811 }
1812}
1813
1814static void hid_process_ordering(struct hid_device *hid)
1815{
1816 struct hid_report *report;
1817 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1818
1819 list_for_each_entry(report, &report_enum->report_list, list)
1820 hid_report_process_ordering(hid, report);
1821}
1822
1823/*
1824 * Output the field into the report.
1825 */
1826
1827static void hid_output_field(const struct hid_device *hid,
1828 struct hid_field *field, __u8 *data)
1829{
1830 unsigned count = field->report_count;
1831 unsigned offset = field->report_offset;
1832 unsigned size = field->report_size;
1833 unsigned n;
1834
1835 for (n = 0; n < count; n++) {
1836 if (field->logical_minimum < 0) /* signed values */
1837 implement(hid, data, offset + n * size, size,
1838 s32ton(field->value[n], size));
1839 else /* unsigned values */
1840 implement(hid, data, offset + n * size, size,
1841 field->value[n]);
1842 }
1843}
1844
1845/*
1846 * Compute the size of a report.
1847 */
1848static size_t hid_compute_report_size(struct hid_report *report)
1849{
1850 if (report->size)
1851 return ((report->size - 1) >> 3) + 1;
1852
1853 return 0;
1854}
1855
1856/*
1857 * Create a report. 'data' has to be allocated using
1858 * hid_alloc_report_buf() so that it has proper size.
1859 */
1860
1861void hid_output_report(struct hid_report *report, __u8 *data)
1862{
1863 unsigned n;
1864
1865 if (report->id > 0)
1866 *data++ = report->id;
1867
1868 memset(data, 0, hid_compute_report_size(report));
1869 for (n = 0; n < report->maxfield; n++)
1870 hid_output_field(report->device, report->field[n], data);
1871}
1872EXPORT_SYMBOL_GPL(hid_output_report);
1873
1874/*
1875 * Allocator for buffer that is going to be passed to hid_output_report()
1876 */
1877u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1878{
1879 /*
1880 * 7 extra bytes are necessary to achieve proper functionality
1881 * of implement() working on 8 byte chunks
1882 */
1883
1884 u32 len = hid_report_len(report) + 7;
1885
1886 return kzalloc(len, flags);
1887}
1888EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1889
1890/*
1891 * Set a field value. The report this field belongs to has to be
1892 * created and transferred to the device, to set this value in the
1893 * device.
1894 */
1895
1896int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1897{
1898 unsigned size;
1899
1900 if (!field)
1901 return -1;
1902
1903 size = field->report_size;
1904
1905 hid_dump_input(field->report->device, field->usage + offset, value);
1906
1907 if (offset >= field->report_count) {
1908 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1909 offset, field->report_count);
1910 return -1;
1911 }
1912 if (field->logical_minimum < 0) {
1913 if (value != snto32(s32ton(value, size), size)) {
1914 hid_err(field->report->device, "value %d is out of range\n", value);
1915 return -1;
1916 }
1917 }
1918 field->value[offset] = value;
1919 return 0;
1920}
1921EXPORT_SYMBOL_GPL(hid_set_field);
1922
1923struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1924 unsigned int application, unsigned int usage)
1925{
1926 struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1927 struct hid_report *report;
1928 int i, j;
1929
1930 list_for_each_entry(report, report_list, list) {
1931 if (report->application != application)
1932 continue;
1933
1934 for (i = 0; i < report->maxfield; i++) {
1935 struct hid_field *field = report->field[i];
1936
1937 for (j = 0; j < field->maxusage; j++) {
1938 if (field->usage[j].hid == usage)
1939 return field;
1940 }
1941 }
1942 }
1943
1944 return NULL;
1945}
1946EXPORT_SYMBOL_GPL(hid_find_field);
1947
1948static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1949 const u8 *data)
1950{
1951 struct hid_report *report;
1952 unsigned int n = 0; /* Normally report number is 0 */
1953
1954 /* Device uses numbered reports, data[0] is report number */
1955 if (report_enum->numbered)
1956 n = *data;
1957
1958 report = report_enum->report_id_hash[n];
1959 if (report == NULL)
1960 dbg_hid("undefined report_id %u received\n", n);
1961
1962 return report;
1963}
1964
1965/*
1966 * Implement a generic .request() callback, using .raw_request()
1967 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1968 */
1969int __hid_request(struct hid_device *hid, struct hid_report *report,
1970 enum hid_class_request reqtype)
1971{
1972 char *buf;
1973 int ret;
1974 u32 len;
1975
1976 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1977 if (!buf)
1978 return -ENOMEM;
1979
1980 len = hid_report_len(report);
1981
1982 if (reqtype == HID_REQ_SET_REPORT)
1983 hid_output_report(report, buf);
1984
1985 ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
1986 report->type, reqtype);
1987 if (ret < 0) {
1988 dbg_hid("unable to complete request: %d\n", ret);
1989 goto out;
1990 }
1991
1992 if (reqtype == HID_REQ_GET_REPORT)
1993 hid_input_report(hid, report->type, buf, ret, 0);
1994
1995 ret = 0;
1996
1997out:
1998 kfree(buf);
1999 return ret;
2000}
2001EXPORT_SYMBOL_GPL(__hid_request);
2002
2003int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2004 int interrupt)
2005{
2006 struct hid_report_enum *report_enum = hid->report_enum + type;
2007 struct hid_report *report;
2008 struct hid_driver *hdrv;
2009 int max_buffer_size = HID_MAX_BUFFER_SIZE;
2010 u32 rsize, csize = size;
2011 u8 *cdata = data;
2012 int ret = 0;
2013
2014 report = hid_get_report(report_enum, data);
2015 if (!report)
2016 goto out;
2017
2018 if (report_enum->numbered) {
2019 cdata++;
2020 csize--;
2021 }
2022
2023 rsize = hid_compute_report_size(report);
2024
2025 if (hid->ll_driver->max_buffer_size)
2026 max_buffer_size = hid->ll_driver->max_buffer_size;
2027
2028 if (report_enum->numbered && rsize >= max_buffer_size)
2029 rsize = max_buffer_size - 1;
2030 else if (rsize > max_buffer_size)
2031 rsize = max_buffer_size;
2032
2033 if (csize < rsize) {
2034 dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2035 csize, rsize);
2036 memset(cdata + csize, 0, rsize - csize);
2037 }
2038
2039 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2040 hid->hiddev_report_event(hid, report);
2041 if (hid->claimed & HID_CLAIMED_HIDRAW) {
2042 ret = hidraw_report_event(hid, data, size);
2043 if (ret)
2044 goto out;
2045 }
2046
2047 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2048 hid_process_report(hid, report, cdata, interrupt);
2049 hdrv = hid->driver;
2050 if (hdrv && hdrv->report)
2051 hdrv->report(hid, report);
2052 }
2053
2054 if (hid->claimed & HID_CLAIMED_INPUT)
2055 hidinput_report_event(hid, report);
2056out:
2057 return ret;
2058}
2059EXPORT_SYMBOL_GPL(hid_report_raw_event);
2060
2061
2062static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2063 u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2064 bool lock_already_taken)
2065{
2066 struct hid_report_enum *report_enum;
2067 struct hid_driver *hdrv;
2068 struct hid_report *report;
2069 int ret = 0;
2070
2071 if (!hid)
2072 return -ENODEV;
2073
2074 ret = down_trylock(&hid->driver_input_lock);
2075 if (lock_already_taken && !ret) {
2076 up(&hid->driver_input_lock);
2077 return -EINVAL;
2078 } else if (!lock_already_taken && ret) {
2079 return -EBUSY;
2080 }
2081
2082 if (!hid->driver) {
2083 ret = -ENODEV;
2084 goto unlock;
2085 }
2086 report_enum = hid->report_enum + type;
2087 hdrv = hid->driver;
2088
2089 data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2090 if (IS_ERR(data)) {
2091 ret = PTR_ERR(data);
2092 goto unlock;
2093 }
2094
2095 if (!size) {
2096 dbg_hid("empty report\n");
2097 ret = -1;
2098 goto unlock;
2099 }
2100
2101 /* Avoid unnecessary overhead if debugfs is disabled */
2102 if (!list_empty(&hid->debug_list))
2103 hid_dump_report(hid, type, data, size);
2104
2105 report = hid_get_report(report_enum, data);
2106
2107 if (!report) {
2108 ret = -1;
2109 goto unlock;
2110 }
2111
2112 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2113 ret = hdrv->raw_event(hid, report, data, size);
2114 if (ret < 0)
2115 goto unlock;
2116 }
2117
2118 ret = hid_report_raw_event(hid, type, data, size, interrupt);
2119
2120unlock:
2121 if (!lock_already_taken)
2122 up(&hid->driver_input_lock);
2123 return ret;
2124}
2125
2126/**
2127 * hid_input_report - report data from lower layer (usb, bt...)
2128 *
2129 * @hid: hid device
2130 * @type: HID report type (HID_*_REPORT)
2131 * @data: report contents
2132 * @size: size of data parameter
2133 * @interrupt: distinguish between interrupt and control transfers
2134 *
2135 * This is data entry for lower layers.
2136 */
2137int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2138 int interrupt)
2139{
2140 return __hid_input_report(hid, type, data, size, interrupt, 0,
2141 false, /* from_bpf */
2142 false /* lock_already_taken */);
2143}
2144EXPORT_SYMBOL_GPL(hid_input_report);
2145
2146bool hid_match_one_id(const struct hid_device *hdev,
2147 const struct hid_device_id *id)
2148{
2149 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2150 (id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2151 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2152 (id->product == HID_ANY_ID || id->product == hdev->product);
2153}
2154
2155const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2156 const struct hid_device_id *id)
2157{
2158 for (; id->bus; id++)
2159 if (hid_match_one_id(hdev, id))
2160 return id;
2161
2162 return NULL;
2163}
2164EXPORT_SYMBOL_GPL(hid_match_id);
2165
2166static const struct hid_device_id hid_hiddev_list[] = {
2167 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2168 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2169 { }
2170};
2171
2172static bool hid_hiddev(struct hid_device *hdev)
2173{
2174 return !!hid_match_id(hdev, hid_hiddev_list);
2175}
2176
2177
2178static ssize_t
2179read_report_descriptor(struct file *filp, struct kobject *kobj,
2180 struct bin_attribute *attr,
2181 char *buf, loff_t off, size_t count)
2182{
2183 struct device *dev = kobj_to_dev(kobj);
2184 struct hid_device *hdev = to_hid_device(dev);
2185
2186 if (off >= hdev->rsize)
2187 return 0;
2188
2189 if (off + count > hdev->rsize)
2190 count = hdev->rsize - off;
2191
2192 memcpy(buf, hdev->rdesc + off, count);
2193
2194 return count;
2195}
2196
2197static ssize_t
2198show_country(struct device *dev, struct device_attribute *attr,
2199 char *buf)
2200{
2201 struct hid_device *hdev = to_hid_device(dev);
2202
2203 return sprintf(buf, "%02x\n", hdev->country & 0xff);
2204}
2205
2206static struct bin_attribute dev_bin_attr_report_desc = {
2207 .attr = { .name = "report_descriptor", .mode = 0444 },
2208 .read = read_report_descriptor,
2209 .size = HID_MAX_DESCRIPTOR_SIZE,
2210};
2211
2212static const struct device_attribute dev_attr_country = {
2213 .attr = { .name = "country", .mode = 0444 },
2214 .show = show_country,
2215};
2216
2217int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2218{
2219 static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2220 "Joystick", "Gamepad", "Keyboard", "Keypad",
2221 "Multi-Axis Controller"
2222 };
2223 const char *type, *bus;
2224 char buf[64] = "";
2225 unsigned int i;
2226 int len;
2227 int ret;
2228
2229 ret = hid_bpf_connect_device(hdev);
2230 if (ret)
2231 return ret;
2232
2233 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2234 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2235 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2236 connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2237 if (hdev->bus != BUS_USB)
2238 connect_mask &= ~HID_CONNECT_HIDDEV;
2239 if (hid_hiddev(hdev))
2240 connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2241
2242 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2243 connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2244 hdev->claimed |= HID_CLAIMED_INPUT;
2245
2246 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2247 !hdev->hiddev_connect(hdev,
2248 connect_mask & HID_CONNECT_HIDDEV_FORCE))
2249 hdev->claimed |= HID_CLAIMED_HIDDEV;
2250 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2251 hdev->claimed |= HID_CLAIMED_HIDRAW;
2252
2253 if (connect_mask & HID_CONNECT_DRIVER)
2254 hdev->claimed |= HID_CLAIMED_DRIVER;
2255
2256 /* Drivers with the ->raw_event callback set are not required to connect
2257 * to any other listener. */
2258 if (!hdev->claimed && !hdev->driver->raw_event) {
2259 hid_err(hdev, "device has no listeners, quitting\n");
2260 return -ENODEV;
2261 }
2262
2263 hid_process_ordering(hdev);
2264
2265 if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2266 (connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2267 hdev->ff_init(hdev);
2268
2269 len = 0;
2270 if (hdev->claimed & HID_CLAIMED_INPUT)
2271 len += sprintf(buf + len, "input");
2272 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2273 len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2274 ((struct hiddev *)hdev->hiddev)->minor);
2275 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2276 len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2277 ((struct hidraw *)hdev->hidraw)->minor);
2278
2279 type = "Device";
2280 for (i = 0; i < hdev->maxcollection; i++) {
2281 struct hid_collection *col = &hdev->collection[i];
2282 if (col->type == HID_COLLECTION_APPLICATION &&
2283 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2284 (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2285 type = types[col->usage & 0xffff];
2286 break;
2287 }
2288 }
2289
2290 switch (hdev->bus) {
2291 case BUS_USB:
2292 bus = "USB";
2293 break;
2294 case BUS_BLUETOOTH:
2295 bus = "BLUETOOTH";
2296 break;
2297 case BUS_I2C:
2298 bus = "I2C";
2299 break;
2300 case BUS_VIRTUAL:
2301 bus = "VIRTUAL";
2302 break;
2303 case BUS_INTEL_ISHTP:
2304 case BUS_AMD_SFH:
2305 bus = "SENSOR HUB";
2306 break;
2307 default:
2308 bus = "<UNKNOWN>";
2309 }
2310
2311 ret = device_create_file(&hdev->dev, &dev_attr_country);
2312 if (ret)
2313 hid_warn(hdev,
2314 "can't create sysfs country code attribute err: %d\n", ret);
2315
2316 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2317 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2318 type, hdev->name, hdev->phys);
2319
2320 return 0;
2321}
2322EXPORT_SYMBOL_GPL(hid_connect);
2323
2324void hid_disconnect(struct hid_device *hdev)
2325{
2326 device_remove_file(&hdev->dev, &dev_attr_country);
2327 if (hdev->claimed & HID_CLAIMED_INPUT)
2328 hidinput_disconnect(hdev);
2329 if (hdev->claimed & HID_CLAIMED_HIDDEV)
2330 hdev->hiddev_disconnect(hdev);
2331 if (hdev->claimed & HID_CLAIMED_HIDRAW)
2332 hidraw_disconnect(hdev);
2333 hdev->claimed = 0;
2334
2335 hid_bpf_disconnect_device(hdev);
2336}
2337EXPORT_SYMBOL_GPL(hid_disconnect);
2338
2339/**
2340 * hid_hw_start - start underlying HW
2341 * @hdev: hid device
2342 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2343 *
2344 * Call this in probe function *after* hid_parse. This will setup HW
2345 * buffers and start the device (if not defeirred to device open).
2346 * hid_hw_stop must be called if this was successful.
2347 */
2348int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2349{
2350 int error;
2351
2352 error = hdev->ll_driver->start(hdev);
2353 if (error)
2354 return error;
2355
2356 if (connect_mask) {
2357 error = hid_connect(hdev, connect_mask);
2358 if (error) {
2359 hdev->ll_driver->stop(hdev);
2360 return error;
2361 }
2362 }
2363
2364 return 0;
2365}
2366EXPORT_SYMBOL_GPL(hid_hw_start);
2367
2368/**
2369 * hid_hw_stop - stop underlying HW
2370 * @hdev: hid device
2371 *
2372 * This is usually called from remove function or from probe when something
2373 * failed and hid_hw_start was called already.
2374 */
2375void hid_hw_stop(struct hid_device *hdev)
2376{
2377 hid_disconnect(hdev);
2378 hdev->ll_driver->stop(hdev);
2379}
2380EXPORT_SYMBOL_GPL(hid_hw_stop);
2381
2382/**
2383 * hid_hw_open - signal underlying HW to start delivering events
2384 * @hdev: hid device
2385 *
2386 * Tell underlying HW to start delivering events from the device.
2387 * This function should be called sometime after successful call
2388 * to hid_hw_start().
2389 */
2390int hid_hw_open(struct hid_device *hdev)
2391{
2392 int ret;
2393
2394 ret = mutex_lock_killable(&hdev->ll_open_lock);
2395 if (ret)
2396 return ret;
2397
2398 if (!hdev->ll_open_count++) {
2399 ret = hdev->ll_driver->open(hdev);
2400 if (ret)
2401 hdev->ll_open_count--;
2402 }
2403
2404 mutex_unlock(&hdev->ll_open_lock);
2405 return ret;
2406}
2407EXPORT_SYMBOL_GPL(hid_hw_open);
2408
2409/**
2410 * hid_hw_close - signal underlaying HW to stop delivering events
2411 *
2412 * @hdev: hid device
2413 *
2414 * This function indicates that we are not interested in the events
2415 * from this device anymore. Delivery of events may or may not stop,
2416 * depending on the number of users still outstanding.
2417 */
2418void hid_hw_close(struct hid_device *hdev)
2419{
2420 mutex_lock(&hdev->ll_open_lock);
2421 if (!--hdev->ll_open_count)
2422 hdev->ll_driver->close(hdev);
2423 mutex_unlock(&hdev->ll_open_lock);
2424}
2425EXPORT_SYMBOL_GPL(hid_hw_close);
2426
2427/**
2428 * hid_hw_request - send report request to device
2429 *
2430 * @hdev: hid device
2431 * @report: report to send
2432 * @reqtype: hid request type
2433 */
2434void hid_hw_request(struct hid_device *hdev,
2435 struct hid_report *report, enum hid_class_request reqtype)
2436{
2437 if (hdev->ll_driver->request)
2438 return hdev->ll_driver->request(hdev, report, reqtype);
2439
2440 __hid_request(hdev, report, reqtype);
2441}
2442EXPORT_SYMBOL_GPL(hid_hw_request);
2443
2444int __hid_hw_raw_request(struct hid_device *hdev,
2445 unsigned char reportnum, __u8 *buf,
2446 size_t len, enum hid_report_type rtype,
2447 enum hid_class_request reqtype,
2448 u64 source, bool from_bpf)
2449{
2450 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2451 int ret;
2452
2453 if (hdev->ll_driver->max_buffer_size)
2454 max_buffer_size = hdev->ll_driver->max_buffer_size;
2455
2456 if (len < 1 || len > max_buffer_size || !buf)
2457 return -EINVAL;
2458
2459 ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2460 reqtype, source, from_bpf);
2461 if (ret)
2462 return ret;
2463
2464 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2465 rtype, reqtype);
2466}
2467
2468/**
2469 * hid_hw_raw_request - send report request to device
2470 *
2471 * @hdev: hid device
2472 * @reportnum: report ID
2473 * @buf: in/out data to transfer
2474 * @len: length of buf
2475 * @rtype: HID report type
2476 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2477 *
2478 * Return: count of data transferred, negative if error
2479 *
2480 * Same behavior as hid_hw_request, but with raw buffers instead.
2481 */
2482int hid_hw_raw_request(struct hid_device *hdev,
2483 unsigned char reportnum, __u8 *buf,
2484 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2485{
2486 return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2487}
2488EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2489
2490int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2491 bool from_bpf)
2492{
2493 unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2494 int ret;
2495
2496 if (hdev->ll_driver->max_buffer_size)
2497 max_buffer_size = hdev->ll_driver->max_buffer_size;
2498
2499 if (len < 1 || len > max_buffer_size || !buf)
2500 return -EINVAL;
2501
2502 ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2503 if (ret)
2504 return ret;
2505
2506 if (hdev->ll_driver->output_report)
2507 return hdev->ll_driver->output_report(hdev, buf, len);
2508
2509 return -ENOSYS;
2510}
2511
2512/**
2513 * hid_hw_output_report - send output report to device
2514 *
2515 * @hdev: hid device
2516 * @buf: raw data to transfer
2517 * @len: length of buf
2518 *
2519 * Return: count of data transferred, negative if error
2520 */
2521int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2522{
2523 return __hid_hw_output_report(hdev, buf, len, 0, false);
2524}
2525EXPORT_SYMBOL_GPL(hid_hw_output_report);
2526
2527#ifdef CONFIG_PM
2528int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2529{
2530 if (hdev->driver && hdev->driver->suspend)
2531 return hdev->driver->suspend(hdev, state);
2532
2533 return 0;
2534}
2535EXPORT_SYMBOL_GPL(hid_driver_suspend);
2536
2537int hid_driver_reset_resume(struct hid_device *hdev)
2538{
2539 if (hdev->driver && hdev->driver->reset_resume)
2540 return hdev->driver->reset_resume(hdev);
2541
2542 return 0;
2543}
2544EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2545
2546int hid_driver_resume(struct hid_device *hdev)
2547{
2548 if (hdev->driver && hdev->driver->resume)
2549 return hdev->driver->resume(hdev);
2550
2551 return 0;
2552}
2553EXPORT_SYMBOL_GPL(hid_driver_resume);
2554#endif /* CONFIG_PM */
2555
2556struct hid_dynid {
2557 struct list_head list;
2558 struct hid_device_id id;
2559};
2560
2561/**
2562 * new_id_store - add a new HID device ID to this driver and re-probe devices
2563 * @drv: target device driver
2564 * @buf: buffer for scanning device ID data
2565 * @count: input size
2566 *
2567 * Adds a new dynamic hid device ID to this driver,
2568 * and causes the driver to probe for all devices again.
2569 */
2570static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2571 size_t count)
2572{
2573 struct hid_driver *hdrv = to_hid_driver(drv);
2574 struct hid_dynid *dynid;
2575 __u32 bus, vendor, product;
2576 unsigned long driver_data = 0;
2577 int ret;
2578
2579 ret = sscanf(buf, "%x %x %x %lx",
2580 &bus, &vendor, &product, &driver_data);
2581 if (ret < 3)
2582 return -EINVAL;
2583
2584 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2585 if (!dynid)
2586 return -ENOMEM;
2587
2588 dynid->id.bus = bus;
2589 dynid->id.group = HID_GROUP_ANY;
2590 dynid->id.vendor = vendor;
2591 dynid->id.product = product;
2592 dynid->id.driver_data = driver_data;
2593
2594 spin_lock(&hdrv->dyn_lock);
2595 list_add_tail(&dynid->list, &hdrv->dyn_list);
2596 spin_unlock(&hdrv->dyn_lock);
2597
2598 ret = driver_attach(&hdrv->driver);
2599
2600 return ret ? : count;
2601}
2602static DRIVER_ATTR_WO(new_id);
2603
2604static struct attribute *hid_drv_attrs[] = {
2605 &driver_attr_new_id.attr,
2606 NULL,
2607};
2608ATTRIBUTE_GROUPS(hid_drv);
2609
2610static void hid_free_dynids(struct hid_driver *hdrv)
2611{
2612 struct hid_dynid *dynid, *n;
2613
2614 spin_lock(&hdrv->dyn_lock);
2615 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2616 list_del(&dynid->list);
2617 kfree(dynid);
2618 }
2619 spin_unlock(&hdrv->dyn_lock);
2620}
2621
2622const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2623 struct hid_driver *hdrv)
2624{
2625 struct hid_dynid *dynid;
2626
2627 spin_lock(&hdrv->dyn_lock);
2628 list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2629 if (hid_match_one_id(hdev, &dynid->id)) {
2630 spin_unlock(&hdrv->dyn_lock);
2631 return &dynid->id;
2632 }
2633 }
2634 spin_unlock(&hdrv->dyn_lock);
2635
2636 return hid_match_id(hdev, hdrv->id_table);
2637}
2638EXPORT_SYMBOL_GPL(hid_match_device);
2639
2640static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2641{
2642 struct hid_driver *hdrv = to_hid_driver(drv);
2643 struct hid_device *hdev = to_hid_device(dev);
2644
2645 return hid_match_device(hdev, hdrv) != NULL;
2646}
2647
2648/**
2649 * hid_compare_device_paths - check if both devices share the same path
2650 * @hdev_a: hid device
2651 * @hdev_b: hid device
2652 * @separator: char to use as separator
2653 *
2654 * Check if two devices share the same path up to the last occurrence of
2655 * the separator char. Both paths must exist (i.e., zero-length paths
2656 * don't match).
2657 */
2658bool hid_compare_device_paths(struct hid_device *hdev_a,
2659 struct hid_device *hdev_b, char separator)
2660{
2661 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2662 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2663
2664 if (n1 != n2 || n1 <= 0 || n2 <= 0)
2665 return false;
2666
2667 return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2668}
2669EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2670
2671static bool hid_check_device_match(struct hid_device *hdev,
2672 struct hid_driver *hdrv,
2673 const struct hid_device_id **id)
2674{
2675 *id = hid_match_device(hdev, hdrv);
2676 if (!*id)
2677 return false;
2678
2679 if (hdrv->match)
2680 return hdrv->match(hdev, hid_ignore_special_drivers);
2681
2682 /*
2683 * hid-generic implements .match(), so we must be dealing with a
2684 * different HID driver here, and can simply check if
2685 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2686 * are set or not.
2687 */
2688 return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2689}
2690
2691static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2692{
2693 const struct hid_device_id *id;
2694 int ret;
2695
2696 if (!hdev->bpf_rsize) {
2697 /* in case a bpf program gets detached, we need to free the old one */
2698 hid_free_bpf_rdesc(hdev);
2699
2700 /* keep this around so we know we called it once */
2701 hdev->bpf_rsize = hdev->dev_rsize;
2702
2703 /* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2704 hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2705 &hdev->bpf_rsize);
2706 }
2707
2708 if (!hid_check_device_match(hdev, hdrv, &id))
2709 return -ENODEV;
2710
2711 hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2712 if (!hdev->devres_group_id)
2713 return -ENOMEM;
2714
2715 /* reset the quirks that has been previously set */
2716 hdev->quirks = hid_lookup_quirk(hdev);
2717 hdev->driver = hdrv;
2718
2719 if (hdrv->probe) {
2720 ret = hdrv->probe(hdev, id);
2721 } else { /* default probe */
2722 ret = hid_open_report(hdev);
2723 if (!ret)
2724 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2725 }
2726
2727 /*
2728 * Note that we are not closing the devres group opened above so
2729 * even resources that were attached to the device after probe is
2730 * run are released when hid_device_remove() is executed. This is
2731 * needed as some drivers would allocate additional resources,
2732 * for example when updating firmware.
2733 */
2734
2735 if (ret) {
2736 devres_release_group(&hdev->dev, hdev->devres_group_id);
2737 hid_close_report(hdev);
2738 hdev->driver = NULL;
2739 }
2740
2741 return ret;
2742}
2743
2744static int hid_device_probe(struct device *dev)
2745{
2746 struct hid_device *hdev = to_hid_device(dev);
2747 struct hid_driver *hdrv = to_hid_driver(dev->driver);
2748 int ret = 0;
2749
2750 if (down_interruptible(&hdev->driver_input_lock))
2751 return -EINTR;
2752
2753 hdev->io_started = false;
2754 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2755
2756 if (!hdev->driver)
2757 ret = __hid_device_probe(hdev, hdrv);
2758
2759 if (!hdev->io_started)
2760 up(&hdev->driver_input_lock);
2761
2762 return ret;
2763}
2764
2765static void hid_device_remove(struct device *dev)
2766{
2767 struct hid_device *hdev = to_hid_device(dev);
2768 struct hid_driver *hdrv;
2769
2770 down(&hdev->driver_input_lock);
2771 hdev->io_started = false;
2772
2773 hdrv = hdev->driver;
2774 if (hdrv) {
2775 if (hdrv->remove)
2776 hdrv->remove(hdev);
2777 else /* default remove */
2778 hid_hw_stop(hdev);
2779
2780 /* Release all devres resources allocated by the driver */
2781 devres_release_group(&hdev->dev, hdev->devres_group_id);
2782
2783 hid_close_report(hdev);
2784 hdev->driver = NULL;
2785 }
2786
2787 if (!hdev->io_started)
2788 up(&hdev->driver_input_lock);
2789}
2790
2791static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2792 char *buf)
2793{
2794 struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2795
2796 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2797 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2798}
2799static DEVICE_ATTR_RO(modalias);
2800
2801static struct attribute *hid_dev_attrs[] = {
2802 &dev_attr_modalias.attr,
2803 NULL,
2804};
2805static struct bin_attribute *hid_dev_bin_attrs[] = {
2806 &dev_bin_attr_report_desc,
2807 NULL
2808};
2809static const struct attribute_group hid_dev_group = {
2810 .attrs = hid_dev_attrs,
2811 .bin_attrs = hid_dev_bin_attrs,
2812};
2813__ATTRIBUTE_GROUPS(hid_dev);
2814
2815static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2816{
2817 const struct hid_device *hdev = to_hid_device(dev);
2818
2819 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2820 hdev->bus, hdev->vendor, hdev->product))
2821 return -ENOMEM;
2822
2823 if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2824 return -ENOMEM;
2825
2826 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2827 return -ENOMEM;
2828
2829 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2830 return -ENOMEM;
2831
2832 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2833 hdev->bus, hdev->group, hdev->vendor, hdev->product))
2834 return -ENOMEM;
2835
2836 return 0;
2837}
2838
2839const struct bus_type hid_bus_type = {
2840 .name = "hid",
2841 .dev_groups = hid_dev_groups,
2842 .drv_groups = hid_drv_groups,
2843 .match = hid_bus_match,
2844 .probe = hid_device_probe,
2845 .remove = hid_device_remove,
2846 .uevent = hid_uevent,
2847};
2848EXPORT_SYMBOL(hid_bus_type);
2849
2850int hid_add_device(struct hid_device *hdev)
2851{
2852 static atomic_t id = ATOMIC_INIT(0);
2853 int ret;
2854
2855 if (WARN_ON(hdev->status & HID_STAT_ADDED))
2856 return -EBUSY;
2857
2858 hdev->quirks = hid_lookup_quirk(hdev);
2859
2860 /* we need to kill them here, otherwise they will stay allocated to
2861 * wait for coming driver */
2862 if (hid_ignore(hdev))
2863 return -ENODEV;
2864
2865 /*
2866 * Check for the mandatory transport channel.
2867 */
2868 if (!hdev->ll_driver->raw_request) {
2869 hid_err(hdev, "transport driver missing .raw_request()\n");
2870 return -EINVAL;
2871 }
2872
2873 /*
2874 * Read the device report descriptor once and use as template
2875 * for the driver-specific modifications.
2876 */
2877 ret = hdev->ll_driver->parse(hdev);
2878 if (ret)
2879 return ret;
2880 if (!hdev->dev_rdesc)
2881 return -ENODEV;
2882
2883 /*
2884 * Scan generic devices for group information
2885 */
2886 if (hid_ignore_special_drivers) {
2887 hdev->group = HID_GROUP_GENERIC;
2888 } else if (!hdev->group &&
2889 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2890 ret = hid_scan_report(hdev);
2891 if (ret)
2892 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2893 }
2894
2895 hdev->id = atomic_inc_return(&id);
2896
2897 /* XXX hack, any other cleaner solution after the driver core
2898 * is converted to allow more than 20 bytes as the device name? */
2899 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2900 hdev->vendor, hdev->product, hdev->id);
2901
2902 hid_debug_register(hdev, dev_name(&hdev->dev));
2903 ret = device_add(&hdev->dev);
2904 if (!ret)
2905 hdev->status |= HID_STAT_ADDED;
2906 else
2907 hid_debug_unregister(hdev);
2908
2909 return ret;
2910}
2911EXPORT_SYMBOL_GPL(hid_add_device);
2912
2913/**
2914 * hid_allocate_device - allocate new hid device descriptor
2915 *
2916 * Allocate and initialize hid device, so that hid_destroy_device might be
2917 * used to free it.
2918 *
2919 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2920 * error value.
2921 */
2922struct hid_device *hid_allocate_device(void)
2923{
2924 struct hid_device *hdev;
2925 int ret = -ENOMEM;
2926
2927 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2928 if (hdev == NULL)
2929 return ERR_PTR(ret);
2930
2931 device_initialize(&hdev->dev);
2932 hdev->dev.release = hid_device_release;
2933 hdev->dev.bus = &hid_bus_type;
2934 device_enable_async_suspend(&hdev->dev);
2935
2936 hid_close_report(hdev);
2937
2938 init_waitqueue_head(&hdev->debug_wait);
2939 INIT_LIST_HEAD(&hdev->debug_list);
2940 spin_lock_init(&hdev->debug_list_lock);
2941 sema_init(&hdev->driver_input_lock, 1);
2942 mutex_init(&hdev->ll_open_lock);
2943 kref_init(&hdev->ref);
2944
2945 ret = hid_bpf_device_init(hdev);
2946 if (ret)
2947 goto out_err;
2948
2949 return hdev;
2950
2951out_err:
2952 hid_destroy_device(hdev);
2953 return ERR_PTR(ret);
2954}
2955EXPORT_SYMBOL_GPL(hid_allocate_device);
2956
2957static void hid_remove_device(struct hid_device *hdev)
2958{
2959 if (hdev->status & HID_STAT_ADDED) {
2960 device_del(&hdev->dev);
2961 hid_debug_unregister(hdev);
2962 hdev->status &= ~HID_STAT_ADDED;
2963 }
2964 hid_free_bpf_rdesc(hdev);
2965 kfree(hdev->dev_rdesc);
2966 hdev->dev_rdesc = NULL;
2967 hdev->dev_rsize = 0;
2968 hdev->bpf_rsize = 0;
2969}
2970
2971/**
2972 * hid_destroy_device - free previously allocated device
2973 *
2974 * @hdev: hid device
2975 *
2976 * If you allocate hid_device through hid_allocate_device, you should ever
2977 * free by this function.
2978 */
2979void hid_destroy_device(struct hid_device *hdev)
2980{
2981 hid_bpf_destroy_device(hdev);
2982 hid_remove_device(hdev);
2983 put_device(&hdev->dev);
2984}
2985EXPORT_SYMBOL_GPL(hid_destroy_device);
2986
2987
2988static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2989{
2990 struct hid_driver *hdrv = data;
2991 struct hid_device *hdev = to_hid_device(dev);
2992
2993 if (hdev->driver == hdrv &&
2994 !hdrv->match(hdev, hid_ignore_special_drivers) &&
2995 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2996 return device_reprobe(dev);
2997
2998 return 0;
2999}
3000
3001static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3002{
3003 struct hid_driver *hdrv = to_hid_driver(drv);
3004
3005 if (hdrv->match) {
3006 bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3007 __hid_bus_reprobe_drivers);
3008 }
3009
3010 return 0;
3011}
3012
3013static int __bus_removed_driver(struct device_driver *drv, void *data)
3014{
3015 return bus_rescan_devices(&hid_bus_type);
3016}
3017
3018int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3019 const char *mod_name)
3020{
3021 int ret;
3022
3023 hdrv->driver.name = hdrv->name;
3024 hdrv->driver.bus = &hid_bus_type;
3025 hdrv->driver.owner = owner;
3026 hdrv->driver.mod_name = mod_name;
3027
3028 INIT_LIST_HEAD(&hdrv->dyn_list);
3029 spin_lock_init(&hdrv->dyn_lock);
3030
3031 ret = driver_register(&hdrv->driver);
3032
3033 if (ret == 0)
3034 bus_for_each_drv(&hid_bus_type, NULL, NULL,
3035 __hid_bus_driver_added);
3036
3037 return ret;
3038}
3039EXPORT_SYMBOL_GPL(__hid_register_driver);
3040
3041void hid_unregister_driver(struct hid_driver *hdrv)
3042{
3043 driver_unregister(&hdrv->driver);
3044 hid_free_dynids(hdrv);
3045
3046 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3047}
3048EXPORT_SYMBOL_GPL(hid_unregister_driver);
3049
3050int hid_check_keys_pressed(struct hid_device *hid)
3051{
3052 struct hid_input *hidinput;
3053 int i;
3054
3055 if (!(hid->claimed & HID_CLAIMED_INPUT))
3056 return 0;
3057
3058 list_for_each_entry(hidinput, &hid->inputs, list) {
3059 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3060 if (hidinput->input->key[i])
3061 return 1;
3062 }
3063
3064 return 0;
3065}
3066EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3067
3068#ifdef CONFIG_HID_BPF
3069static const struct hid_ops __hid_ops = {
3070 .hid_get_report = hid_get_report,
3071 .hid_hw_raw_request = __hid_hw_raw_request,
3072 .hid_hw_output_report = __hid_hw_output_report,
3073 .hid_input_report = __hid_input_report,
3074 .owner = THIS_MODULE,
3075 .bus_type = &hid_bus_type,
3076};
3077#endif
3078
3079static int __init hid_init(void)
3080{
3081 int ret;
3082
3083 ret = bus_register(&hid_bus_type);
3084 if (ret) {
3085 pr_err("can't register hid bus\n");
3086 goto err;
3087 }
3088
3089#ifdef CONFIG_HID_BPF
3090 hid_ops = &__hid_ops;
3091#endif
3092
3093 ret = hidraw_init();
3094 if (ret)
3095 goto err_bus;
3096
3097 hid_debug_init();
3098
3099 return 0;
3100err_bus:
3101 bus_unregister(&hid_bus_type);
3102err:
3103 return ret;
3104}
3105
3106static void __exit hid_exit(void)
3107{
3108#ifdef CONFIG_HID_BPF
3109 hid_ops = NULL;
3110#endif
3111 hid_debug_exit();
3112 hidraw_exit();
3113 bus_unregister(&hid_bus_type);
3114 hid_quirks_exit(HID_BUS_ANY);
3115}
3116
3117module_init(hid_init);
3118module_exit(hid_exit);
3119
3120MODULE_AUTHOR("Andreas Gal");
3121MODULE_AUTHOR("Vojtech Pavlik");
3122MODULE_AUTHOR("Jiri Kosina");
3123MODULE_DESCRIPTION("HID support for Linux");
3124MODULE_LICENSE("GPL");