Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  HID support for Linux
   4 *
   5 *  Copyright (c) 1999 Andreas Gal
   6 *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
   7 *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
   8 *  Copyright (c) 2006-2012 Jiri Kosina
   9 */
  10
  11/*
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/init.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/mm.h>
  22#include <linux/spinlock.h>
  23#include <linux/unaligned.h>
  24#include <asm/byteorder.h>
  25#include <linux/input.h>
  26#include <linux/wait.h>
  27#include <linux/vmalloc.h>
  28#include <linux/sched.h>
  29#include <linux/semaphore.h>
  30
  31#include <linux/hid.h>
  32#include <linux/hiddev.h>
  33#include <linux/hid-debug.h>
  34#include <linux/hidraw.h>
  35
  36#include "hid-ids.h"
  37
  38/*
  39 * Version Information
  40 */
  41
  42#define DRIVER_DESC "HID core driver"
  43
  44static int hid_ignore_special_drivers = 0;
  45module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
  46MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
  47
  48/*
  49 * Convert a signed n-bit integer to signed 32-bit integer.
  50 */
  51
  52static s32 snto32(__u32 value, unsigned int n)
  53{
  54	if (!value || !n)
  55		return 0;
  56
  57	if (n > 32)
  58		n = 32;
  59
  60	return sign_extend32(value, n - 1);
  61}
  62
  63/*
  64 * Convert a signed 32-bit integer to a signed n-bit integer.
  65 */
  66
  67static u32 s32ton(__s32 value, unsigned int n)
  68{
  69	s32 a = value >> (n - 1);
  70
  71	if (a && a != -1)
  72		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
  73	return value & ((1 << n) - 1);
  74}
  75
  76/*
  77 * Register a new report for a device.
  78 */
  79
  80struct hid_report *hid_register_report(struct hid_device *device,
  81				       enum hid_report_type type, unsigned int id,
  82				       unsigned int application)
  83{
  84	struct hid_report_enum *report_enum = device->report_enum + type;
  85	struct hid_report *report;
  86
  87	if (id >= HID_MAX_IDS)
  88		return NULL;
  89	if (report_enum->report_id_hash[id])
  90		return report_enum->report_id_hash[id];
  91
  92	report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
  93	if (!report)
  94		return NULL;
  95
  96	if (id != 0)
  97		report_enum->numbered = 1;
  98
  99	report->id = id;
 100	report->type = type;
 101	report->size = 0;
 102	report->device = device;
 103	report->application = application;
 104	report_enum->report_id_hash[id] = report;
 105
 106	list_add_tail(&report->list, &report_enum->report_list);
 107	INIT_LIST_HEAD(&report->field_entry_list);
 108
 109	return report;
 110}
 111EXPORT_SYMBOL_GPL(hid_register_report);
 112
 113/*
 114 * Register a new field for this report.
 115 */
 116
 117static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
 118{
 119	struct hid_field *field;
 120
 121	if (report->maxfield == HID_MAX_FIELDS) {
 122		hid_err(report->device, "too many fields in report\n");
 123		return NULL;
 124	}
 125
 126	field = kvzalloc((sizeof(struct hid_field) +
 127			  usages * sizeof(struct hid_usage) +
 128			  3 * usages * sizeof(unsigned int)), GFP_KERNEL);
 129	if (!field)
 130		return NULL;
 131
 132	field->index = report->maxfield++;
 133	report->field[field->index] = field;
 134	field->usage = (struct hid_usage *)(field + 1);
 135	field->value = (s32 *)(field->usage + usages);
 136	field->new_value = (s32 *)(field->value + usages);
 137	field->usages_priorities = (s32 *)(field->new_value + usages);
 138	field->report = report;
 139
 140	return field;
 141}
 142
 143/*
 144 * Open a collection. The type/usage is pushed on the stack.
 145 */
 146
 147static int open_collection(struct hid_parser *parser, unsigned type)
 148{
 149	struct hid_collection *collection;
 150	unsigned usage;
 151	int collection_index;
 152
 153	usage = parser->local.usage[0];
 154
 155	if (parser->collection_stack_ptr == parser->collection_stack_size) {
 156		unsigned int *collection_stack;
 157		unsigned int new_size = parser->collection_stack_size +
 158					HID_COLLECTION_STACK_SIZE;
 159
 160		collection_stack = krealloc(parser->collection_stack,
 161					    new_size * sizeof(unsigned int),
 162					    GFP_KERNEL);
 163		if (!collection_stack)
 164			return -ENOMEM;
 165
 166		parser->collection_stack = collection_stack;
 167		parser->collection_stack_size = new_size;
 168	}
 169
 170	if (parser->device->maxcollection == parser->device->collection_size) {
 171		collection = kmalloc(
 172				array3_size(sizeof(struct hid_collection),
 173					    parser->device->collection_size,
 174					    2),
 175				GFP_KERNEL);
 176		if (collection == NULL) {
 177			hid_err(parser->device, "failed to reallocate collection array\n");
 178			return -ENOMEM;
 179		}
 180		memcpy(collection, parser->device->collection,
 181			sizeof(struct hid_collection) *
 182			parser->device->collection_size);
 183		memset(collection + parser->device->collection_size, 0,
 184			sizeof(struct hid_collection) *
 185			parser->device->collection_size);
 186		kfree(parser->device->collection);
 187		parser->device->collection = collection;
 188		parser->device->collection_size *= 2;
 189	}
 190
 191	parser->collection_stack[parser->collection_stack_ptr++] =
 192		parser->device->maxcollection;
 193
 194	collection_index = parser->device->maxcollection++;
 195	collection = parser->device->collection + collection_index;
 196	collection->type = type;
 197	collection->usage = usage;
 198	collection->level = parser->collection_stack_ptr - 1;
 199	collection->parent_idx = (collection->level == 0) ? -1 :
 200		parser->collection_stack[collection->level - 1];
 201
 202	if (type == HID_COLLECTION_APPLICATION)
 203		parser->device->maxapplication++;
 204
 205	return 0;
 206}
 207
 208/*
 209 * Close a collection.
 210 */
 211
 212static int close_collection(struct hid_parser *parser)
 213{
 214	if (!parser->collection_stack_ptr) {
 215		hid_err(parser->device, "collection stack underflow\n");
 216		return -EINVAL;
 217	}
 218	parser->collection_stack_ptr--;
 219	return 0;
 220}
 221
 222/*
 223 * Climb up the stack, search for the specified collection type
 224 * and return the usage.
 225 */
 226
 227static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
 228{
 229	struct hid_collection *collection = parser->device->collection;
 230	int n;
 231
 232	for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
 233		unsigned index = parser->collection_stack[n];
 234		if (collection[index].type == type)
 235			return collection[index].usage;
 236	}
 237	return 0; /* we know nothing about this usage type */
 238}
 239
 240/*
 241 * Concatenate usage which defines 16 bits or less with the
 242 * currently defined usage page to form a 32 bit usage
 243 */
 244
 245static void complete_usage(struct hid_parser *parser, unsigned int index)
 246{
 247	parser->local.usage[index] &= 0xFFFF;
 248	parser->local.usage[index] |=
 249		(parser->global.usage_page & 0xFFFF) << 16;
 250}
 251
 252/*
 253 * Add a usage to the temporary parser table.
 254 */
 255
 256static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
 257{
 258	if (parser->local.usage_index >= HID_MAX_USAGES) {
 259		hid_err(parser->device, "usage index exceeded\n");
 260		return -1;
 261	}
 262	parser->local.usage[parser->local.usage_index] = usage;
 263
 264	/*
 265	 * If Usage item only includes usage id, concatenate it with
 266	 * currently defined usage page
 267	 */
 268	if (size <= 2)
 269		complete_usage(parser, parser->local.usage_index);
 270
 271	parser->local.usage_size[parser->local.usage_index] = size;
 272	parser->local.collection_index[parser->local.usage_index] =
 273		parser->collection_stack_ptr ?
 274		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
 275	parser->local.usage_index++;
 276	return 0;
 277}
 278
 279/*
 280 * Register a new field for this report.
 281 */
 282
 283static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
 284{
 285	struct hid_report *report;
 286	struct hid_field *field;
 287	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
 288	unsigned int usages;
 289	unsigned int offset;
 290	unsigned int i;
 291	unsigned int application;
 292
 293	application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
 294
 295	report = hid_register_report(parser->device, report_type,
 296				     parser->global.report_id, application);
 297	if (!report) {
 298		hid_err(parser->device, "hid_register_report failed\n");
 299		return -1;
 300	}
 301
 302	/* Handle both signed and unsigned cases properly */
 303	if ((parser->global.logical_minimum < 0 &&
 304		parser->global.logical_maximum <
 305		parser->global.logical_minimum) ||
 306		(parser->global.logical_minimum >= 0 &&
 307		(__u32)parser->global.logical_maximum <
 308		(__u32)parser->global.logical_minimum)) {
 309		dbg_hid("logical range invalid 0x%x 0x%x\n",
 310			parser->global.logical_minimum,
 311			parser->global.logical_maximum);
 312		return -1;
 313	}
 314
 315	offset = report->size;
 316	report->size += parser->global.report_size * parser->global.report_count;
 317
 318	if (parser->device->ll_driver->max_buffer_size)
 319		max_buffer_size = parser->device->ll_driver->max_buffer_size;
 320
 321	/* Total size check: Allow for possible report index byte */
 322	if (report->size > (max_buffer_size - 1) << 3) {
 323		hid_err(parser->device, "report is too long\n");
 324		return -1;
 325	}
 326
 327	if (!parser->local.usage_index) /* Ignore padding fields */
 328		return 0;
 329
 330	usages = max_t(unsigned, parser->local.usage_index,
 331				 parser->global.report_count);
 332
 333	field = hid_register_field(report, usages);
 334	if (!field)
 335		return 0;
 336
 337	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
 338	field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
 339	field->application = application;
 340
 341	for (i = 0; i < usages; i++) {
 342		unsigned j = i;
 343		/* Duplicate the last usage we parsed if we have excess values */
 344		if (i >= parser->local.usage_index)
 345			j = parser->local.usage_index - 1;
 346		field->usage[i].hid = parser->local.usage[j];
 347		field->usage[i].collection_index =
 348			parser->local.collection_index[j];
 349		field->usage[i].usage_index = i;
 350		field->usage[i].resolution_multiplier = 1;
 351	}
 352
 353	field->maxusage = usages;
 354	field->flags = flags;
 355	field->report_offset = offset;
 356	field->report_type = report_type;
 357	field->report_size = parser->global.report_size;
 358	field->report_count = parser->global.report_count;
 359	field->logical_minimum = parser->global.logical_minimum;
 360	field->logical_maximum = parser->global.logical_maximum;
 361	field->physical_minimum = parser->global.physical_minimum;
 362	field->physical_maximum = parser->global.physical_maximum;
 363	field->unit_exponent = parser->global.unit_exponent;
 364	field->unit = parser->global.unit;
 365
 366	return 0;
 367}
 368
 369/*
 370 * Read data value from item.
 371 */
 372
 373static u32 item_udata(struct hid_item *item)
 374{
 375	switch (item->size) {
 376	case 1: return item->data.u8;
 377	case 2: return item->data.u16;
 378	case 4: return item->data.u32;
 379	}
 380	return 0;
 381}
 382
 383static s32 item_sdata(struct hid_item *item)
 384{
 385	switch (item->size) {
 386	case 1: return item->data.s8;
 387	case 2: return item->data.s16;
 388	case 4: return item->data.s32;
 389	}
 390	return 0;
 391}
 392
 393/*
 394 * Process a global item.
 395 */
 396
 397static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 398{
 399	__s32 raw_value;
 400	switch (item->tag) {
 401	case HID_GLOBAL_ITEM_TAG_PUSH:
 402
 403		if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
 404			hid_err(parser->device, "global environment stack overflow\n");
 405			return -1;
 406		}
 407
 408		memcpy(parser->global_stack + parser->global_stack_ptr++,
 409			&parser->global, sizeof(struct hid_global));
 410		return 0;
 411
 412	case HID_GLOBAL_ITEM_TAG_POP:
 413
 414		if (!parser->global_stack_ptr) {
 415			hid_err(parser->device, "global environment stack underflow\n");
 416			return -1;
 417		}
 418
 419		memcpy(&parser->global, parser->global_stack +
 420			--parser->global_stack_ptr, sizeof(struct hid_global));
 421		return 0;
 422
 423	case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
 424		parser->global.usage_page = item_udata(item);
 425		return 0;
 426
 427	case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
 428		parser->global.logical_minimum = item_sdata(item);
 429		return 0;
 430
 431	case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
 432		if (parser->global.logical_minimum < 0)
 433			parser->global.logical_maximum = item_sdata(item);
 434		else
 435			parser->global.logical_maximum = item_udata(item);
 436		return 0;
 437
 438	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
 439		parser->global.physical_minimum = item_sdata(item);
 440		return 0;
 441
 442	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
 443		if (parser->global.physical_minimum < 0)
 444			parser->global.physical_maximum = item_sdata(item);
 445		else
 446			parser->global.physical_maximum = item_udata(item);
 447		return 0;
 448
 449	case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
 450		/* Many devices provide unit exponent as a two's complement
 451		 * nibble due to the common misunderstanding of HID
 452		 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
 453		 * both this and the standard encoding. */
 454		raw_value = item_sdata(item);
 455		if (!(raw_value & 0xfffffff0))
 456			parser->global.unit_exponent = snto32(raw_value, 4);
 457		else
 458			parser->global.unit_exponent = raw_value;
 459		return 0;
 460
 461	case HID_GLOBAL_ITEM_TAG_UNIT:
 462		parser->global.unit = item_udata(item);
 463		return 0;
 464
 465	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
 466		parser->global.report_size = item_udata(item);
 467		if (parser->global.report_size > 256) {
 468			hid_err(parser->device, "invalid report_size %d\n",
 469					parser->global.report_size);
 470			return -1;
 471		}
 472		return 0;
 473
 474	case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
 475		parser->global.report_count = item_udata(item);
 476		if (parser->global.report_count > HID_MAX_USAGES) {
 477			hid_err(parser->device, "invalid report_count %d\n",
 478					parser->global.report_count);
 479			return -1;
 480		}
 481		return 0;
 482
 483	case HID_GLOBAL_ITEM_TAG_REPORT_ID:
 484		parser->global.report_id = item_udata(item);
 485		if (parser->global.report_id == 0 ||
 486		    parser->global.report_id >= HID_MAX_IDS) {
 487			hid_err(parser->device, "report_id %u is invalid\n",
 488				parser->global.report_id);
 489			return -1;
 490		}
 491		return 0;
 492
 493	default:
 494		hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
 495		return -1;
 496	}
 497}
 498
 499/*
 500 * Process a local item.
 501 */
 502
 503static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 504{
 505	__u32 data;
 506	unsigned n;
 507	__u32 count;
 508
 509	data = item_udata(item);
 510
 511	switch (item->tag) {
 512	case HID_LOCAL_ITEM_TAG_DELIMITER:
 513
 514		if (data) {
 515			/*
 516			 * We treat items before the first delimiter
 517			 * as global to all usage sets (branch 0).
 518			 * In the moment we process only these global
 519			 * items and the first delimiter set.
 520			 */
 521			if (parser->local.delimiter_depth != 0) {
 522				hid_err(parser->device, "nested delimiters\n");
 523				return -1;
 524			}
 525			parser->local.delimiter_depth++;
 526			parser->local.delimiter_branch++;
 527		} else {
 528			if (parser->local.delimiter_depth < 1) {
 529				hid_err(parser->device, "bogus close delimiter\n");
 530				return -1;
 531			}
 532			parser->local.delimiter_depth--;
 533		}
 534		return 0;
 535
 536	case HID_LOCAL_ITEM_TAG_USAGE:
 537
 538		if (parser->local.delimiter_branch > 1) {
 539			dbg_hid("alternative usage ignored\n");
 540			return 0;
 541		}
 542
 543		return hid_add_usage(parser, data, item->size);
 544
 545	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
 546
 547		if (parser->local.delimiter_branch > 1) {
 548			dbg_hid("alternative usage ignored\n");
 549			return 0;
 550		}
 551
 552		parser->local.usage_minimum = data;
 553		return 0;
 554
 555	case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
 556
 557		if (parser->local.delimiter_branch > 1) {
 558			dbg_hid("alternative usage ignored\n");
 559			return 0;
 560		}
 561
 562		count = data - parser->local.usage_minimum;
 563		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
 564			/*
 565			 * We do not warn if the name is not set, we are
 566			 * actually pre-scanning the device.
 567			 */
 568			if (dev_name(&parser->device->dev))
 569				hid_warn(parser->device,
 570					 "ignoring exceeding usage max\n");
 571			data = HID_MAX_USAGES - parser->local.usage_index +
 572				parser->local.usage_minimum - 1;
 573			if (data <= 0) {
 574				hid_err(parser->device,
 575					"no more usage index available\n");
 576				return -1;
 577			}
 578		}
 579
 580		for (n = parser->local.usage_minimum; n <= data; n++)
 581			if (hid_add_usage(parser, n, item->size)) {
 582				dbg_hid("hid_add_usage failed\n");
 583				return -1;
 584			}
 585		return 0;
 586
 587	default:
 588
 589		dbg_hid("unknown local item tag 0x%x\n", item->tag);
 590		return 0;
 591	}
 592	return 0;
 593}
 594
 595/*
 596 * Concatenate Usage Pages into Usages where relevant:
 597 * As per specification, 6.2.2.8: "When the parser encounters a main item it
 598 * concatenates the last declared Usage Page with a Usage to form a complete
 599 * usage value."
 600 */
 601
 602static void hid_concatenate_last_usage_page(struct hid_parser *parser)
 603{
 604	int i;
 605	unsigned int usage_page;
 606	unsigned int current_page;
 607
 608	if (!parser->local.usage_index)
 609		return;
 610
 611	usage_page = parser->global.usage_page;
 612
 613	/*
 614	 * Concatenate usage page again only if last declared Usage Page
 615	 * has not been already used in previous usages concatenation
 616	 */
 617	for (i = parser->local.usage_index - 1; i >= 0; i--) {
 618		if (parser->local.usage_size[i] > 2)
 619			/* Ignore extended usages */
 620			continue;
 621
 622		current_page = parser->local.usage[i] >> 16;
 623		if (current_page == usage_page)
 624			break;
 625
 626		complete_usage(parser, i);
 627	}
 628}
 629
 630/*
 631 * Process a main item.
 632 */
 633
 634static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
 635{
 636	__u32 data;
 637	int ret;
 638
 639	hid_concatenate_last_usage_page(parser);
 640
 641	data = item_udata(item);
 642
 643	switch (item->tag) {
 644	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
 645		ret = open_collection(parser, data & 0xff);
 646		break;
 647	case HID_MAIN_ITEM_TAG_END_COLLECTION:
 648		ret = close_collection(parser);
 649		break;
 650	case HID_MAIN_ITEM_TAG_INPUT:
 651		ret = hid_add_field(parser, HID_INPUT_REPORT, data);
 652		break;
 653	case HID_MAIN_ITEM_TAG_OUTPUT:
 654		ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
 655		break;
 656	case HID_MAIN_ITEM_TAG_FEATURE:
 657		ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
 658		break;
 659	default:
 660		hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
 661		ret = 0;
 662	}
 663
 664	memset(&parser->local, 0, sizeof(parser->local));	/* Reset the local parser environment */
 665
 666	return ret;
 667}
 668
 669/*
 670 * Process a reserved item.
 671 */
 672
 673static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
 674{
 675	dbg_hid("reserved item type, tag 0x%x\n", item->tag);
 676	return 0;
 677}
 678
 679/*
 680 * Free a report and all registered fields. The field->usage and
 681 * field->value table's are allocated behind the field, so we need
 682 * only to free(field) itself.
 683 */
 684
 685static void hid_free_report(struct hid_report *report)
 686{
 687	unsigned n;
 688
 689	kfree(report->field_entries);
 690
 691	for (n = 0; n < report->maxfield; n++)
 692		kvfree(report->field[n]);
 693	kfree(report);
 694}
 695
 696/*
 697 * Close report. This function returns the device
 698 * state to the point prior to hid_open_report().
 699 */
 700static void hid_close_report(struct hid_device *device)
 701{
 702	unsigned i, j;
 703
 704	for (i = 0; i < HID_REPORT_TYPES; i++) {
 705		struct hid_report_enum *report_enum = device->report_enum + i;
 706
 707		for (j = 0; j < HID_MAX_IDS; j++) {
 708			struct hid_report *report = report_enum->report_id_hash[j];
 709			if (report)
 710				hid_free_report(report);
 711		}
 712		memset(report_enum, 0, sizeof(*report_enum));
 713		INIT_LIST_HEAD(&report_enum->report_list);
 714	}
 715
 716	/*
 717	 * If the HID driver had a rdesc_fixup() callback, dev->rdesc
 718	 * will be allocated by hid-core and needs to be freed.
 719	 * Otherwise, it is either equal to dev_rdesc or bpf_rdesc, in
 720	 * which cases it'll be freed later on device removal or destroy.
 721	 */
 722	if (device->rdesc != device->dev_rdesc && device->rdesc != device->bpf_rdesc)
 723		kfree(device->rdesc);
 724	device->rdesc = NULL;
 725	device->rsize = 0;
 726
 727	kfree(device->collection);
 728	device->collection = NULL;
 729	device->collection_size = 0;
 730	device->maxcollection = 0;
 731	device->maxapplication = 0;
 732
 733	device->status &= ~HID_STAT_PARSED;
 734}
 735
 736static inline void hid_free_bpf_rdesc(struct hid_device *hdev)
 737{
 738	/* bpf_rdesc is either equal to dev_rdesc or allocated by call_hid_bpf_rdesc_fixup() */
 739	if (hdev->bpf_rdesc != hdev->dev_rdesc)
 740		kfree(hdev->bpf_rdesc);
 741	hdev->bpf_rdesc = NULL;
 742}
 743
 744/*
 745 * Free a device structure, all reports, and all fields.
 746 */
 747
 748void hiddev_free(struct kref *ref)
 749{
 750	struct hid_device *hid = container_of(ref, struct hid_device, ref);
 751
 752	hid_close_report(hid);
 753	hid_free_bpf_rdesc(hid);
 754	kfree(hid->dev_rdesc);
 755	kfree(hid);
 756}
 757
 758static void hid_device_release(struct device *dev)
 759{
 760	struct hid_device *hid = to_hid_device(dev);
 761
 762	kref_put(&hid->ref, hiddev_free);
 763}
 764
 765/*
 766 * Fetch a report description item from the data stream. We support long
 767 * items, though they are not used yet.
 768 */
 769
 770static const u8 *fetch_item(const __u8 *start, const __u8 *end, struct hid_item *item)
 771{
 772	u8 b;
 773
 774	if ((end - start) <= 0)
 775		return NULL;
 776
 777	b = *start++;
 778
 779	item->type = (b >> 2) & 3;
 780	item->tag  = (b >> 4) & 15;
 781
 782	if (item->tag == HID_ITEM_TAG_LONG) {
 783
 784		item->format = HID_ITEM_FORMAT_LONG;
 785
 786		if ((end - start) < 2)
 787			return NULL;
 788
 789		item->size = *start++;
 790		item->tag  = *start++;
 791
 792		if ((end - start) < item->size)
 793			return NULL;
 794
 795		item->data.longdata = start;
 796		start += item->size;
 797		return start;
 798	}
 799
 800	item->format = HID_ITEM_FORMAT_SHORT;
 801	item->size = BIT(b & 3) >> 1; /* 0, 1, 2, 3 -> 0, 1, 2, 4 */
 802
 803	if (end - start < item->size)
 804		return NULL;
 805
 806	switch (item->size) {
 807	case 0:
 808		break;
 809
 810	case 1:
 811		item->data.u8 = *start;
 812		break;
 
 
 813
 814	case 2:
 
 
 815		item->data.u16 = get_unaligned_le16(start);
 816		break;
 
 817
 818	case 4:
 
 
 
 819		item->data.u32 = get_unaligned_le32(start);
 820		break;
 
 821	}
 822
 823	return start + item->size;
 824}
 825
 826static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
 827{
 828	struct hid_device *hid = parser->device;
 829
 830	if (usage == HID_DG_CONTACTID)
 831		hid->group = HID_GROUP_MULTITOUCH;
 832}
 833
 834static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
 835{
 836	if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
 837	    parser->global.report_size == 8)
 838		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
 839
 840	if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
 841	    parser->global.report_size == 8)
 842		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
 843}
 844
 845static void hid_scan_collection(struct hid_parser *parser, unsigned type)
 846{
 847	struct hid_device *hid = parser->device;
 848	int i;
 849
 850	if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
 851	    (type == HID_COLLECTION_PHYSICAL ||
 852	     type == HID_COLLECTION_APPLICATION))
 853		hid->group = HID_GROUP_SENSOR_HUB;
 854
 855	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
 856	    hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
 857	    hid->group == HID_GROUP_MULTITOUCH)
 858		hid->group = HID_GROUP_GENERIC;
 859
 860	if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
 861		for (i = 0; i < parser->local.usage_index; i++)
 862			if (parser->local.usage[i] == HID_GD_POINTER)
 863				parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
 864
 865	if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
 866		parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
 867
 868	if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
 869		for (i = 0; i < parser->local.usage_index; i++)
 870			if (parser->local.usage[i] ==
 871					(HID_UP_GOOGLEVENDOR | 0x0001))
 872				parser->device->group =
 873					HID_GROUP_VIVALDI;
 874}
 875
 876static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
 877{
 878	__u32 data;
 879	int i;
 880
 881	hid_concatenate_last_usage_page(parser);
 882
 883	data = item_udata(item);
 884
 885	switch (item->tag) {
 886	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
 887		hid_scan_collection(parser, data & 0xff);
 888		break;
 889	case HID_MAIN_ITEM_TAG_END_COLLECTION:
 890		break;
 891	case HID_MAIN_ITEM_TAG_INPUT:
 892		/* ignore constant inputs, they will be ignored by hid-input */
 893		if (data & HID_MAIN_ITEM_CONSTANT)
 894			break;
 895		for (i = 0; i < parser->local.usage_index; i++)
 896			hid_scan_input_usage(parser, parser->local.usage[i]);
 897		break;
 898	case HID_MAIN_ITEM_TAG_OUTPUT:
 899		break;
 900	case HID_MAIN_ITEM_TAG_FEATURE:
 901		for (i = 0; i < parser->local.usage_index; i++)
 902			hid_scan_feature_usage(parser, parser->local.usage[i]);
 903		break;
 904	}
 905
 906	/* Reset the local parser environment */
 907	memset(&parser->local, 0, sizeof(parser->local));
 908
 909	return 0;
 910}
 911
 912/*
 913 * Scan a report descriptor before the device is added to the bus.
 914 * Sets device groups and other properties that determine what driver
 915 * to load.
 916 */
 917static int hid_scan_report(struct hid_device *hid)
 918{
 919	struct hid_parser *parser;
 920	struct hid_item item;
 921	const __u8 *start = hid->dev_rdesc;
 922	const __u8 *end = start + hid->dev_rsize;
 923	static int (*dispatch_type[])(struct hid_parser *parser,
 924				      struct hid_item *item) = {
 925		hid_scan_main,
 926		hid_parser_global,
 927		hid_parser_local,
 928		hid_parser_reserved
 929	};
 930
 931	parser = vzalloc(sizeof(struct hid_parser));
 932	if (!parser)
 933		return -ENOMEM;
 934
 935	parser->device = hid;
 936	hid->group = HID_GROUP_GENERIC;
 937
 938	/*
 939	 * The parsing is simpler than the one in hid_open_report() as we should
 940	 * be robust against hid errors. Those errors will be raised by
 941	 * hid_open_report() anyway.
 942	 */
 943	while ((start = fetch_item(start, end, &item)) != NULL)
 944		dispatch_type[item.type](parser, &item);
 945
 946	/*
 947	 * Handle special flags set during scanning.
 948	 */
 949	if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
 950	    (hid->group == HID_GROUP_MULTITOUCH))
 951		hid->group = HID_GROUP_MULTITOUCH_WIN_8;
 952
 953	/*
 954	 * Vendor specific handlings
 955	 */
 956	switch (hid->vendor) {
 957	case USB_VENDOR_ID_WACOM:
 958		hid->group = HID_GROUP_WACOM;
 959		break;
 960	case USB_VENDOR_ID_SYNAPTICS:
 961		if (hid->group == HID_GROUP_GENERIC)
 962			if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
 963			    && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
 964				/*
 965				 * hid-rmi should take care of them,
 966				 * not hid-generic
 967				 */
 968				hid->group = HID_GROUP_RMI;
 969		break;
 970	}
 971
 972	kfree(parser->collection_stack);
 973	vfree(parser);
 974	return 0;
 975}
 976
 977/**
 978 * hid_parse_report - parse device report
 979 *
 980 * @hid: hid device
 981 * @start: report start
 982 * @size: report size
 983 *
 984 * Allocate the device report as read by the bus driver. This function should
 985 * only be called from parse() in ll drivers.
 986 */
 987int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size)
 988{
 989	hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
 990	if (!hid->dev_rdesc)
 991		return -ENOMEM;
 992	hid->dev_rsize = size;
 993	return 0;
 994}
 995EXPORT_SYMBOL_GPL(hid_parse_report);
 996
 997static const char * const hid_report_names[] = {
 998	"HID_INPUT_REPORT",
 999	"HID_OUTPUT_REPORT",
1000	"HID_FEATURE_REPORT",
1001};
1002/**
1003 * hid_validate_values - validate existing device report's value indexes
1004 *
1005 * @hid: hid device
1006 * @type: which report type to examine
1007 * @id: which report ID to examine (0 for first)
1008 * @field_index: which report field to examine
1009 * @report_counts: expected number of values
1010 *
1011 * Validate the number of values in a given field of a given report, after
1012 * parsing.
1013 */
1014struct hid_report *hid_validate_values(struct hid_device *hid,
1015				       enum hid_report_type type, unsigned int id,
1016				       unsigned int field_index,
1017				       unsigned int report_counts)
1018{
1019	struct hid_report *report;
1020
1021	if (type > HID_FEATURE_REPORT) {
1022		hid_err(hid, "invalid HID report type %u\n", type);
1023		return NULL;
1024	}
1025
1026	if (id >= HID_MAX_IDS) {
1027		hid_err(hid, "invalid HID report id %u\n", id);
1028		return NULL;
1029	}
1030
1031	/*
1032	 * Explicitly not using hid_get_report() here since it depends on
1033	 * ->numbered being checked, which may not always be the case when
1034	 * drivers go to access report values.
1035	 */
1036	if (id == 0) {
1037		/*
1038		 * Validating on id 0 means we should examine the first
1039		 * report in the list.
1040		 */
1041		report = list_first_entry_or_null(
1042				&hid->report_enum[type].report_list,
1043				struct hid_report, list);
1044	} else {
1045		report = hid->report_enum[type].report_id_hash[id];
1046	}
1047	if (!report) {
1048		hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1049		return NULL;
1050	}
1051	if (report->maxfield <= field_index) {
1052		hid_err(hid, "not enough fields in %s %u\n",
1053			hid_report_names[type], id);
1054		return NULL;
1055	}
1056	if (report->field[field_index]->report_count < report_counts) {
1057		hid_err(hid, "not enough values in %s %u field %u\n",
1058			hid_report_names[type], id, field_index);
1059		return NULL;
1060	}
1061	return report;
1062}
1063EXPORT_SYMBOL_GPL(hid_validate_values);
1064
1065static int hid_calculate_multiplier(struct hid_device *hid,
1066				     struct hid_field *multiplier)
1067{
1068	int m;
1069	__s32 v = *multiplier->value;
1070	__s32 lmin = multiplier->logical_minimum;
1071	__s32 lmax = multiplier->logical_maximum;
1072	__s32 pmin = multiplier->physical_minimum;
1073	__s32 pmax = multiplier->physical_maximum;
1074
1075	/*
1076	 * "Because OS implementations will generally divide the control's
1077	 * reported count by the Effective Resolution Multiplier, designers
1078	 * should take care not to establish a potential Effective
1079	 * Resolution Multiplier of zero."
1080	 * HID Usage Table, v1.12, Section 4.3.1, p31
1081	 */
1082	if (lmax - lmin == 0)
1083		return 1;
1084	/*
1085	 * Handling the unit exponent is left as an exercise to whoever
1086	 * finds a device where that exponent is not 0.
1087	 */
1088	m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1089	if (unlikely(multiplier->unit_exponent != 0)) {
1090		hid_warn(hid,
1091			 "unsupported Resolution Multiplier unit exponent %d\n",
1092			 multiplier->unit_exponent);
1093	}
1094
1095	/* There are no devices with an effective multiplier > 255 */
1096	if (unlikely(m == 0 || m > 255 || m < -255)) {
1097		hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1098		m = 1;
1099	}
1100
1101	return m;
1102}
1103
1104static void hid_apply_multiplier_to_field(struct hid_device *hid,
1105					  struct hid_field *field,
1106					  struct hid_collection *multiplier_collection,
1107					  int effective_multiplier)
1108{
1109	struct hid_collection *collection;
1110	struct hid_usage *usage;
1111	int i;
1112
1113	/*
1114	 * If multiplier_collection is NULL, the multiplier applies
1115	 * to all fields in the report.
1116	 * Otherwise, it is the Logical Collection the multiplier applies to
1117	 * but our field may be in a subcollection of that collection.
1118	 */
1119	for (i = 0; i < field->maxusage; i++) {
1120		usage = &field->usage[i];
1121
1122		collection = &hid->collection[usage->collection_index];
1123		while (collection->parent_idx != -1 &&
1124		       collection != multiplier_collection)
1125			collection = &hid->collection[collection->parent_idx];
1126
1127		if (collection->parent_idx != -1 ||
1128		    multiplier_collection == NULL)
1129			usage->resolution_multiplier = effective_multiplier;
1130
1131	}
1132}
1133
1134static void hid_apply_multiplier(struct hid_device *hid,
1135				 struct hid_field *multiplier)
1136{
1137	struct hid_report_enum *rep_enum;
1138	struct hid_report *rep;
1139	struct hid_field *field;
1140	struct hid_collection *multiplier_collection;
1141	int effective_multiplier;
1142	int i;
1143
1144	/*
1145	 * "The Resolution Multiplier control must be contained in the same
1146	 * Logical Collection as the control(s) to which it is to be applied.
1147	 * If no Resolution Multiplier is defined, then the Resolution
1148	 * Multiplier defaults to 1.  If more than one control exists in a
1149	 * Logical Collection, the Resolution Multiplier is associated with
1150	 * all controls in the collection. If no Logical Collection is
1151	 * defined, the Resolution Multiplier is associated with all
1152	 * controls in the report."
1153	 * HID Usage Table, v1.12, Section 4.3.1, p30
1154	 *
1155	 * Thus, search from the current collection upwards until we find a
1156	 * logical collection. Then search all fields for that same parent
1157	 * collection. Those are the fields the multiplier applies to.
1158	 *
1159	 * If we have more than one multiplier, it will overwrite the
1160	 * applicable fields later.
1161	 */
1162	multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1163	while (multiplier_collection->parent_idx != -1 &&
1164	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
1165		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
1166	if (multiplier_collection->type != HID_COLLECTION_LOGICAL)
1167		multiplier_collection = NULL;
1168
1169	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1170
1171	rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1172	list_for_each_entry(rep, &rep_enum->report_list, list) {
1173		for (i = 0; i < rep->maxfield; i++) {
1174			field = rep->field[i];
1175			hid_apply_multiplier_to_field(hid, field,
1176						      multiplier_collection,
1177						      effective_multiplier);
1178		}
1179	}
1180}
1181
1182/*
1183 * hid_setup_resolution_multiplier - set up all resolution multipliers
1184 *
1185 * @device: hid device
1186 *
1187 * Search for all Resolution Multiplier Feature Reports and apply their
1188 * value to all matching Input items. This only updates the internal struct
1189 * fields.
1190 *
1191 * The Resolution Multiplier is applied by the hardware. If the multiplier
1192 * is anything other than 1, the hardware will send pre-multiplied events
1193 * so that the same physical interaction generates an accumulated
1194 *	accumulated_value = value * * multiplier
1195 * This may be achieved by sending
1196 * - "value * multiplier" for each event, or
1197 * - "value" but "multiplier" times as frequently, or
1198 * - a combination of the above
1199 * The only guarantee is that the same physical interaction always generates
1200 * an accumulated 'value * multiplier'.
1201 *
1202 * This function must be called before any event processing and after
1203 * any SetRequest to the Resolution Multiplier.
1204 */
1205void hid_setup_resolution_multiplier(struct hid_device *hid)
1206{
1207	struct hid_report_enum *rep_enum;
1208	struct hid_report *rep;
1209	struct hid_usage *usage;
1210	int i, j;
1211
1212	rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1213	list_for_each_entry(rep, &rep_enum->report_list, list) {
1214		for (i = 0; i < rep->maxfield; i++) {
1215			/* Ignore if report count is out of bounds. */
1216			if (rep->field[i]->report_count < 1)
1217				continue;
1218
1219			for (j = 0; j < rep->field[i]->maxusage; j++) {
1220				usage = &rep->field[i]->usage[j];
1221				if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1222					hid_apply_multiplier(hid,
1223							     rep->field[i]);
1224			}
1225		}
1226	}
1227}
1228EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1229
1230/**
1231 * hid_open_report - open a driver-specific device report
1232 *
1233 * @device: hid device
1234 *
1235 * Parse a report description into a hid_device structure. Reports are
1236 * enumerated, fields are attached to these reports.
1237 * 0 returned on success, otherwise nonzero error value.
1238 *
1239 * This function (or the equivalent hid_parse() macro) should only be
1240 * called from probe() in drivers, before starting the device.
1241 */
1242int hid_open_report(struct hid_device *device)
1243{
1244	struct hid_parser *parser;
1245	struct hid_item item;
1246	unsigned int size;
1247	const __u8 *start;
1248	const __u8 *end;
1249	const __u8 *next;
 
1250	int ret;
1251	int i;
1252	static int (*dispatch_type[])(struct hid_parser *parser,
1253				      struct hid_item *item) = {
1254		hid_parser_main,
1255		hid_parser_global,
1256		hid_parser_local,
1257		hid_parser_reserved
1258	};
1259
1260	if (WARN_ON(device->status & HID_STAT_PARSED))
1261		return -EBUSY;
1262
1263	start = device->bpf_rdesc;
1264	if (WARN_ON(!start))
1265		return -ENODEV;
1266	size = device->bpf_rsize;
1267
1268	if (device->driver->report_fixup) {
1269		/*
1270		 * device->driver->report_fixup() needs to work
1271		 * on a copy of our report descriptor so it can
1272		 * change it.
1273		 */
1274		__u8 *buf = kmemdup(start, size, GFP_KERNEL);
1275
1276		if (buf == NULL)
1277			return -ENOMEM;
1278
 
1279		start = device->driver->report_fixup(device, buf, &size);
 
 
1280
1281		/*
1282		 * The second kmemdup is required in case report_fixup() returns
1283		 * a static read-only memory, but we have no idea if that memory
1284		 * needs to be cleaned up or not at the end.
1285		 */
1286		start = kmemdup(start, size, GFP_KERNEL);
1287		kfree(buf);
1288		if (start == NULL)
1289			return -ENOMEM;
1290	}
1291
1292	device->rdesc = start;
1293	device->rsize = size;
1294
1295	parser = vzalloc(sizeof(struct hid_parser));
1296	if (!parser) {
1297		ret = -ENOMEM;
1298		goto alloc_err;
1299	}
1300
1301	parser->device = device;
1302
1303	end = start + size;
1304
1305	device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1306				     sizeof(struct hid_collection), GFP_KERNEL);
1307	if (!device->collection) {
1308		ret = -ENOMEM;
1309		goto err;
1310	}
1311	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1312	for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1313		device->collection[i].parent_idx = -1;
1314
1315	ret = -EINVAL;
1316	while ((next = fetch_item(start, end, &item)) != NULL) {
1317		start = next;
1318
1319		if (item.format != HID_ITEM_FORMAT_SHORT) {
1320			hid_err(device, "unexpected long global item\n");
1321			goto err;
1322		}
1323
1324		if (dispatch_type[item.type](parser, &item)) {
1325			hid_err(device, "item %u %u %u %u parsing failed\n",
1326				item.format, (unsigned)item.size,
1327				(unsigned)item.type, (unsigned)item.tag);
1328			goto err;
1329		}
1330
1331		if (start == end) {
1332			if (parser->collection_stack_ptr) {
1333				hid_err(device, "unbalanced collection at end of report description\n");
1334				goto err;
1335			}
1336			if (parser->local.delimiter_depth) {
1337				hid_err(device, "unbalanced delimiter at end of report description\n");
1338				goto err;
1339			}
1340
1341			/*
1342			 * fetch initial values in case the device's
1343			 * default multiplier isn't the recommended 1
1344			 */
1345			hid_setup_resolution_multiplier(device);
1346
1347			kfree(parser->collection_stack);
1348			vfree(parser);
1349			device->status |= HID_STAT_PARSED;
1350
1351			return 0;
1352		}
1353	}
1354
1355	hid_err(device, "item fetching failed at offset %u/%u\n",
1356		size - (unsigned int)(end - start), size);
1357err:
1358	kfree(parser->collection_stack);
1359alloc_err:
1360	vfree(parser);
1361	hid_close_report(device);
1362	return ret;
1363}
1364EXPORT_SYMBOL_GPL(hid_open_report);
1365
1366/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1367 * Extract/implement a data field from/to a little endian report (bit array).
1368 *
1369 * Code sort-of follows HID spec:
1370 *     http://www.usb.org/developers/hidpage/HID1_11.pdf
1371 *
1372 * While the USB HID spec allows unlimited length bit fields in "report
1373 * descriptors", most devices never use more than 16 bits.
1374 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1375 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1376 */
1377
1378static u32 __extract(u8 *report, unsigned offset, int n)
1379{
1380	unsigned int idx = offset / 8;
1381	unsigned int bit_nr = 0;
1382	unsigned int bit_shift = offset % 8;
1383	int bits_to_copy = 8 - bit_shift;
1384	u32 value = 0;
1385	u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1386
1387	while (n > 0) {
1388		value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1389		n -= bits_to_copy;
1390		bit_nr += bits_to_copy;
1391		bits_to_copy = 8;
1392		bit_shift = 0;
1393		idx++;
1394	}
1395
1396	return value & mask;
1397}
1398
1399u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1400			unsigned offset, unsigned n)
1401{
1402	if (n > 32) {
1403		hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1404			      __func__, n, current->comm);
1405		n = 32;
1406	}
1407
1408	return __extract(report, offset, n);
1409}
1410EXPORT_SYMBOL_GPL(hid_field_extract);
1411
1412/*
1413 * "implement" : set bits in a little endian bit stream.
1414 * Same concepts as "extract" (see comments above).
1415 * The data mangled in the bit stream remains in little endian
1416 * order the whole time. It make more sense to talk about
1417 * endianness of register values by considering a register
1418 * a "cached" copy of the little endian bit stream.
1419 */
1420
1421static void __implement(u8 *report, unsigned offset, int n, u32 value)
1422{
1423	unsigned int idx = offset / 8;
1424	unsigned int bit_shift = offset % 8;
1425	int bits_to_set = 8 - bit_shift;
1426
1427	while (n - bits_to_set >= 0) {
1428		report[idx] &= ~(0xff << bit_shift);
1429		report[idx] |= value << bit_shift;
1430		value >>= bits_to_set;
1431		n -= bits_to_set;
1432		bits_to_set = 8;
1433		bit_shift = 0;
1434		idx++;
1435	}
1436
1437	/* last nibble */
1438	if (n) {
1439		u8 bit_mask = ((1U << n) - 1);
1440		report[idx] &= ~(bit_mask << bit_shift);
1441		report[idx] |= value << bit_shift;
1442	}
1443}
1444
1445static void implement(const struct hid_device *hid, u8 *report,
1446		      unsigned offset, unsigned n, u32 value)
1447{
1448	if (unlikely(n > 32)) {
1449		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1450			 __func__, n, current->comm);
1451		n = 32;
1452	} else if (n < 32) {
1453		u32 m = (1U << n) - 1;
1454
1455		if (unlikely(value > m)) {
1456			hid_warn(hid,
1457				 "%s() called with too large value %d (n: %d)! (%s)\n",
1458				 __func__, value, n, current->comm);
 
1459			value &= m;
1460		}
1461	}
1462
1463	__implement(report, offset, n, value);
1464}
1465
1466/*
1467 * Search an array for a value.
1468 */
1469
1470static int search(__s32 *array, __s32 value, unsigned n)
1471{
1472	while (n--) {
1473		if (*array++ == value)
1474			return 0;
1475	}
1476	return -1;
1477}
1478
1479/**
1480 * hid_match_report - check if driver's raw_event should be called
1481 *
1482 * @hid: hid device
1483 * @report: hid report to match against
1484 *
1485 * compare hid->driver->report_table->report_type to report->type
1486 */
1487static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1488{
1489	const struct hid_report_id *id = hid->driver->report_table;
1490
1491	if (!id) /* NULL means all */
1492		return 1;
1493
1494	for (; id->report_type != HID_TERMINATOR; id++)
1495		if (id->report_type == HID_ANY_ID ||
1496				id->report_type == report->type)
1497			return 1;
1498	return 0;
1499}
1500
1501/**
1502 * hid_match_usage - check if driver's event should be called
1503 *
1504 * @hid: hid device
1505 * @usage: usage to match against
1506 *
1507 * compare hid->driver->usage_table->usage_{type,code} to
1508 * usage->usage_{type,code}
1509 */
1510static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1511{
1512	const struct hid_usage_id *id = hid->driver->usage_table;
1513
1514	if (!id) /* NULL means all */
1515		return 1;
1516
1517	for (; id->usage_type != HID_ANY_ID - 1; id++)
1518		if ((id->usage_hid == HID_ANY_ID ||
1519				id->usage_hid == usage->hid) &&
1520				(id->usage_type == HID_ANY_ID ||
1521				id->usage_type == usage->type) &&
1522				(id->usage_code == HID_ANY_ID ||
1523				 id->usage_code == usage->code))
1524			return 1;
1525	return 0;
1526}
1527
1528static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1529		struct hid_usage *usage, __s32 value, int interrupt)
1530{
1531	struct hid_driver *hdrv = hid->driver;
1532	int ret;
1533
1534	if (!list_empty(&hid->debug_list))
1535		hid_dump_input(hid, usage, value);
1536
1537	if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1538		ret = hdrv->event(hid, field, usage, value);
1539		if (ret != 0) {
1540			if (ret < 0)
1541				hid_err(hid, "%s's event failed with %d\n",
1542						hdrv->name, ret);
1543			return;
1544		}
1545	}
1546
1547	if (hid->claimed & HID_CLAIMED_INPUT)
1548		hidinput_hid_event(hid, field, usage, value);
1549	if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1550		hid->hiddev_hid_event(hid, field, usage, value);
1551}
1552
1553/*
1554 * Checks if the given value is valid within this field
1555 */
1556static inline int hid_array_value_is_valid(struct hid_field *field,
1557					   __s32 value)
1558{
1559	__s32 min = field->logical_minimum;
1560
1561	/*
1562	 * Value needs to be between logical min and max, and
1563	 * (value - min) is used as an index in the usage array.
1564	 * This array is of size field->maxusage
1565	 */
1566	return value >= min &&
1567	       value <= field->logical_maximum &&
1568	       value - min < field->maxusage;
1569}
1570
1571/*
1572 * Fetch the field from the data. The field content is stored for next
1573 * report processing (we do differential reporting to the layer).
1574 */
1575static void hid_input_fetch_field(struct hid_device *hid,
1576				  struct hid_field *field,
1577				  __u8 *data)
1578{
1579	unsigned n;
1580	unsigned count = field->report_count;
1581	unsigned offset = field->report_offset;
1582	unsigned size = field->report_size;
1583	__s32 min = field->logical_minimum;
1584	__s32 *value;
1585
1586	value = field->new_value;
1587	memset(value, 0, count * sizeof(__s32));
1588	field->ignored = false;
1589
1590	for (n = 0; n < count; n++) {
1591
1592		value[n] = min < 0 ?
1593			snto32(hid_field_extract(hid, data, offset + n * size,
1594			       size), size) :
1595			hid_field_extract(hid, data, offset + n * size, size);
1596
1597		/* Ignore report if ErrorRollOver */
1598		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1599		    hid_array_value_is_valid(field, value[n]) &&
1600		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1601			field->ignored = true;
1602			return;
1603		}
1604	}
1605}
1606
1607/*
1608 * Process a received variable field.
1609 */
1610
1611static void hid_input_var_field(struct hid_device *hid,
1612				struct hid_field *field,
1613				int interrupt)
1614{
1615	unsigned int count = field->report_count;
1616	__s32 *value = field->new_value;
1617	unsigned int n;
1618
1619	for (n = 0; n < count; n++)
1620		hid_process_event(hid,
1621				  field,
1622				  &field->usage[n],
1623				  value[n],
1624				  interrupt);
1625
1626	memcpy(field->value, value, count * sizeof(__s32));
1627}
1628
1629/*
1630 * Process a received array field. The field content is stored for
1631 * next report processing (we do differential reporting to the layer).
1632 */
1633
1634static void hid_input_array_field(struct hid_device *hid,
1635				  struct hid_field *field,
1636				  int interrupt)
1637{
1638	unsigned int n;
1639	unsigned int count = field->report_count;
1640	__s32 min = field->logical_minimum;
1641	__s32 *value;
1642
1643	value = field->new_value;
1644
1645	/* ErrorRollOver */
1646	if (field->ignored)
1647		return;
1648
1649	for (n = 0; n < count; n++) {
1650		if (hid_array_value_is_valid(field, field->value[n]) &&
1651		    search(value, field->value[n], count))
1652			hid_process_event(hid,
1653					  field,
1654					  &field->usage[field->value[n] - min],
1655					  0,
1656					  interrupt);
1657
1658		if (hid_array_value_is_valid(field, value[n]) &&
1659		    search(field->value, value[n], count))
1660			hid_process_event(hid,
1661					  field,
1662					  &field->usage[value[n] - min],
1663					  1,
1664					  interrupt);
1665	}
1666
1667	memcpy(field->value, value, count * sizeof(__s32));
1668}
1669
1670/*
1671 * Analyse a received report, and fetch the data from it. The field
1672 * content is stored for next report processing (we do differential
1673 * reporting to the layer).
1674 */
1675static void hid_process_report(struct hid_device *hid,
1676			       struct hid_report *report,
1677			       __u8 *data,
1678			       int interrupt)
1679{
1680	unsigned int a;
1681	struct hid_field_entry *entry;
1682	struct hid_field *field;
1683
1684	/* first retrieve all incoming values in data */
1685	for (a = 0; a < report->maxfield; a++)
1686		hid_input_fetch_field(hid, report->field[a], data);
1687
1688	if (!list_empty(&report->field_entry_list)) {
1689		/* INPUT_REPORT, we have a priority list of fields */
1690		list_for_each_entry(entry,
1691				    &report->field_entry_list,
1692				    list) {
1693			field = entry->field;
1694
1695			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1696				hid_process_event(hid,
1697						  field,
1698						  &field->usage[entry->index],
1699						  field->new_value[entry->index],
1700						  interrupt);
1701			else
1702				hid_input_array_field(hid, field, interrupt);
1703		}
1704
1705		/* we need to do the memcpy at the end for var items */
1706		for (a = 0; a < report->maxfield; a++) {
1707			field = report->field[a];
1708
1709			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1710				memcpy(field->value, field->new_value,
1711				       field->report_count * sizeof(__s32));
1712		}
1713	} else {
1714		/* FEATURE_REPORT, regular processing */
1715		for (a = 0; a < report->maxfield; a++) {
1716			field = report->field[a];
1717
1718			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1719				hid_input_var_field(hid, field, interrupt);
1720			else
1721				hid_input_array_field(hid, field, interrupt);
1722		}
1723	}
1724}
1725
1726/*
1727 * Insert a given usage_index in a field in the list
1728 * of processed usages in the report.
1729 *
1730 * The elements of lower priority score are processed
1731 * first.
1732 */
1733static void __hid_insert_field_entry(struct hid_device *hid,
1734				     struct hid_report *report,
1735				     struct hid_field_entry *entry,
1736				     struct hid_field *field,
1737				     unsigned int usage_index)
1738{
1739	struct hid_field_entry *next;
1740
1741	entry->field = field;
1742	entry->index = usage_index;
1743	entry->priority = field->usages_priorities[usage_index];
1744
1745	/* insert the element at the correct position */
1746	list_for_each_entry(next,
1747			    &report->field_entry_list,
1748			    list) {
1749		/*
1750		 * the priority of our element is strictly higher
1751		 * than the next one, insert it before
1752		 */
1753		if (entry->priority > next->priority) {
1754			list_add_tail(&entry->list, &next->list);
1755			return;
1756		}
1757	}
1758
1759	/* lowest priority score: insert at the end */
1760	list_add_tail(&entry->list, &report->field_entry_list);
1761}
1762
1763static void hid_report_process_ordering(struct hid_device *hid,
1764					struct hid_report *report)
1765{
1766	struct hid_field *field;
1767	struct hid_field_entry *entries;
1768	unsigned int a, u, usages;
1769	unsigned int count = 0;
1770
1771	/* count the number of individual fields in the report */
1772	for (a = 0; a < report->maxfield; a++) {
1773		field = report->field[a];
1774
1775		if (field->flags & HID_MAIN_ITEM_VARIABLE)
1776			count += field->report_count;
1777		else
1778			count++;
1779	}
1780
1781	/* allocate the memory to process the fields */
1782	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1783	if (!entries)
1784		return;
1785
1786	report->field_entries = entries;
1787
1788	/*
1789	 * walk through all fields in the report and
1790	 * store them by priority order in report->field_entry_list
1791	 *
1792	 * - Var elements are individualized (field + usage_index)
1793	 * - Arrays are taken as one, we can not chose an order for them
1794	 */
1795	usages = 0;
1796	for (a = 0; a < report->maxfield; a++) {
1797		field = report->field[a];
1798
1799		if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1800			for (u = 0; u < field->report_count; u++) {
1801				__hid_insert_field_entry(hid, report,
1802							 &entries[usages],
1803							 field, u);
1804				usages++;
1805			}
1806		} else {
1807			__hid_insert_field_entry(hid, report, &entries[usages],
1808						 field, 0);
1809			usages++;
1810		}
1811	}
1812}
1813
1814static void hid_process_ordering(struct hid_device *hid)
1815{
1816	struct hid_report *report;
1817	struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1818
1819	list_for_each_entry(report, &report_enum->report_list, list)
1820		hid_report_process_ordering(hid, report);
1821}
1822
1823/*
1824 * Output the field into the report.
1825 */
1826
1827static void hid_output_field(const struct hid_device *hid,
1828			     struct hid_field *field, __u8 *data)
1829{
1830	unsigned count = field->report_count;
1831	unsigned offset = field->report_offset;
1832	unsigned size = field->report_size;
1833	unsigned n;
1834
1835	for (n = 0; n < count; n++) {
1836		if (field->logical_minimum < 0)	/* signed values */
1837			implement(hid, data, offset + n * size, size,
1838				  s32ton(field->value[n], size));
1839		else				/* unsigned values */
1840			implement(hid, data, offset + n * size, size,
1841				  field->value[n]);
1842	}
1843}
1844
1845/*
1846 * Compute the size of a report.
1847 */
1848static size_t hid_compute_report_size(struct hid_report *report)
1849{
1850	if (report->size)
1851		return ((report->size - 1) >> 3) + 1;
1852
1853	return 0;
1854}
1855
1856/*
1857 * Create a report. 'data' has to be allocated using
1858 * hid_alloc_report_buf() so that it has proper size.
1859 */
1860
1861void hid_output_report(struct hid_report *report, __u8 *data)
1862{
1863	unsigned n;
1864
1865	if (report->id > 0)
1866		*data++ = report->id;
1867
1868	memset(data, 0, hid_compute_report_size(report));
1869	for (n = 0; n < report->maxfield; n++)
1870		hid_output_field(report->device, report->field[n], data);
1871}
1872EXPORT_SYMBOL_GPL(hid_output_report);
1873
1874/*
1875 * Allocator for buffer that is going to be passed to hid_output_report()
1876 */
1877u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1878{
1879	/*
1880	 * 7 extra bytes are necessary to achieve proper functionality
1881	 * of implement() working on 8 byte chunks
1882	 */
1883
1884	u32 len = hid_report_len(report) + 7;
1885
1886	return kzalloc(len, flags);
1887}
1888EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1889
1890/*
1891 * Set a field value. The report this field belongs to has to be
1892 * created and transferred to the device, to set this value in the
1893 * device.
1894 */
1895
1896int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1897{
1898	unsigned size;
1899
1900	if (!field)
1901		return -1;
1902
1903	size = field->report_size;
1904
1905	hid_dump_input(field->report->device, field->usage + offset, value);
1906
1907	if (offset >= field->report_count) {
1908		hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1909				offset, field->report_count);
1910		return -1;
1911	}
1912	if (field->logical_minimum < 0) {
1913		if (value != snto32(s32ton(value, size), size)) {
1914			hid_err(field->report->device, "value %d is out of range\n", value);
1915			return -1;
1916		}
1917	}
1918	field->value[offset] = value;
1919	return 0;
1920}
1921EXPORT_SYMBOL_GPL(hid_set_field);
1922
1923struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type,
1924				 unsigned int application, unsigned int usage)
1925{
1926	struct list_head *report_list = &hdev->report_enum[report_type].report_list;
1927	struct hid_report *report;
1928	int i, j;
1929
1930	list_for_each_entry(report, report_list, list) {
1931		if (report->application != application)
1932			continue;
1933
1934		for (i = 0; i < report->maxfield; i++) {
1935			struct hid_field *field = report->field[i];
1936
1937			for (j = 0; j < field->maxusage; j++) {
1938				if (field->usage[j].hid == usage)
1939					return field;
1940			}
1941		}
1942	}
1943
1944	return NULL;
1945}
1946EXPORT_SYMBOL_GPL(hid_find_field);
1947
1948static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1949		const u8 *data)
1950{
1951	struct hid_report *report;
1952	unsigned int n = 0;	/* Normally report number is 0 */
1953
1954	/* Device uses numbered reports, data[0] is report number */
1955	if (report_enum->numbered)
1956		n = *data;
1957
1958	report = report_enum->report_id_hash[n];
1959	if (report == NULL)
1960		dbg_hid("undefined report_id %u received\n", n);
1961
1962	return report;
1963}
1964
1965/*
1966 * Implement a generic .request() callback, using .raw_request()
1967 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1968 */
1969int __hid_request(struct hid_device *hid, struct hid_report *report,
1970		enum hid_class_request reqtype)
1971{
1972	char *buf;
1973	int ret;
1974	u32 len;
1975
1976	buf = hid_alloc_report_buf(report, GFP_KERNEL);
1977	if (!buf)
1978		return -ENOMEM;
1979
1980	len = hid_report_len(report);
1981
1982	if (reqtype == HID_REQ_SET_REPORT)
1983		hid_output_report(report, buf);
1984
1985	ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
1986					  report->type, reqtype);
1987	if (ret < 0) {
1988		dbg_hid("unable to complete request: %d\n", ret);
1989		goto out;
1990	}
1991
1992	if (reqtype == HID_REQ_GET_REPORT)
1993		hid_input_report(hid, report->type, buf, ret, 0);
1994
1995	ret = 0;
1996
1997out:
1998	kfree(buf);
1999	return ret;
2000}
2001EXPORT_SYMBOL_GPL(__hid_request);
2002
2003int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2004			 int interrupt)
2005{
2006	struct hid_report_enum *report_enum = hid->report_enum + type;
2007	struct hid_report *report;
2008	struct hid_driver *hdrv;
2009	int max_buffer_size = HID_MAX_BUFFER_SIZE;
2010	u32 rsize, csize = size;
2011	u8 *cdata = data;
2012	int ret = 0;
2013
2014	report = hid_get_report(report_enum, data);
2015	if (!report)
2016		goto out;
2017
2018	if (report_enum->numbered) {
2019		cdata++;
2020		csize--;
2021	}
2022
2023	rsize = hid_compute_report_size(report);
2024
2025	if (hid->ll_driver->max_buffer_size)
2026		max_buffer_size = hid->ll_driver->max_buffer_size;
2027
2028	if (report_enum->numbered && rsize >= max_buffer_size)
2029		rsize = max_buffer_size - 1;
2030	else if (rsize > max_buffer_size)
2031		rsize = max_buffer_size;
2032
2033	if (csize < rsize) {
2034		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2035				csize, rsize);
2036		memset(cdata + csize, 0, rsize - csize);
2037	}
2038
2039	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2040		hid->hiddev_report_event(hid, report);
2041	if (hid->claimed & HID_CLAIMED_HIDRAW) {
2042		ret = hidraw_report_event(hid, data, size);
2043		if (ret)
2044			goto out;
2045	}
2046
2047	if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2048		hid_process_report(hid, report, cdata, interrupt);
2049		hdrv = hid->driver;
2050		if (hdrv && hdrv->report)
2051			hdrv->report(hid, report);
2052	}
2053
2054	if (hid->claimed & HID_CLAIMED_INPUT)
2055		hidinput_report_event(hid, report);
2056out:
2057	return ret;
2058}
2059EXPORT_SYMBOL_GPL(hid_report_raw_event);
2060
2061
2062static int __hid_input_report(struct hid_device *hid, enum hid_report_type type,
2063			      u8 *data, u32 size, int interrupt, u64 source, bool from_bpf,
2064			      bool lock_already_taken)
 
 
 
 
 
 
 
 
 
2065{
2066	struct hid_report_enum *report_enum;
2067	struct hid_driver *hdrv;
2068	struct hid_report *report;
2069	int ret = 0;
2070
2071	if (!hid)
2072		return -ENODEV;
2073
2074	ret = down_trylock(&hid->driver_input_lock);
2075	if (lock_already_taken && !ret) {
2076		up(&hid->driver_input_lock);
2077		return -EINVAL;
2078	} else if (!lock_already_taken && ret) {
2079		return -EBUSY;
2080	}
2081
2082	if (!hid->driver) {
2083		ret = -ENODEV;
2084		goto unlock;
2085	}
2086	report_enum = hid->report_enum + type;
2087	hdrv = hid->driver;
2088
2089	data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt, source, from_bpf);
2090	if (IS_ERR(data)) {
2091		ret = PTR_ERR(data);
2092		goto unlock;
2093	}
2094
2095	if (!size) {
2096		dbg_hid("empty report\n");
2097		ret = -1;
2098		goto unlock;
2099	}
2100
2101	/* Avoid unnecessary overhead if debugfs is disabled */
2102	if (!list_empty(&hid->debug_list))
2103		hid_dump_report(hid, type, data, size);
2104
2105	report = hid_get_report(report_enum, data);
2106
2107	if (!report) {
2108		ret = -1;
2109		goto unlock;
2110	}
2111
2112	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2113		ret = hdrv->raw_event(hid, report, data, size);
2114		if (ret < 0)
2115			goto unlock;
2116	}
2117
2118	ret = hid_report_raw_event(hid, type, data, size, interrupt);
2119
2120unlock:
2121	if (!lock_already_taken)
2122		up(&hid->driver_input_lock);
2123	return ret;
2124}
2125
2126/**
2127 * hid_input_report - report data from lower layer (usb, bt...)
2128 *
2129 * @hid: hid device
2130 * @type: HID report type (HID_*_REPORT)
2131 * @data: report contents
2132 * @size: size of data parameter
2133 * @interrupt: distinguish between interrupt and control transfers
2134 *
2135 * This is data entry for lower layers.
2136 */
2137int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2138		     int interrupt)
2139{
2140	return __hid_input_report(hid, type, data, size, interrupt, 0,
2141				  false, /* from_bpf */
2142				  false /* lock_already_taken */);
2143}
2144EXPORT_SYMBOL_GPL(hid_input_report);
2145
2146bool hid_match_one_id(const struct hid_device *hdev,
2147		      const struct hid_device_id *id)
2148{
2149	return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2150		(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2151		(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2152		(id->product == HID_ANY_ID || id->product == hdev->product);
2153}
2154
2155const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2156		const struct hid_device_id *id)
2157{
2158	for (; id->bus; id++)
2159		if (hid_match_one_id(hdev, id))
2160			return id;
2161
2162	return NULL;
2163}
2164EXPORT_SYMBOL_GPL(hid_match_id);
2165
2166static const struct hid_device_id hid_hiddev_list[] = {
2167	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2168	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2169	{ }
2170};
2171
2172static bool hid_hiddev(struct hid_device *hdev)
2173{
2174	return !!hid_match_id(hdev, hid_hiddev_list);
2175}
2176
2177
2178static ssize_t
2179read_report_descriptor(struct file *filp, struct kobject *kobj,
2180		struct bin_attribute *attr,
2181		char *buf, loff_t off, size_t count)
2182{
2183	struct device *dev = kobj_to_dev(kobj);
2184	struct hid_device *hdev = to_hid_device(dev);
2185
2186	if (off >= hdev->rsize)
2187		return 0;
2188
2189	if (off + count > hdev->rsize)
2190		count = hdev->rsize - off;
2191
2192	memcpy(buf, hdev->rdesc + off, count);
2193
2194	return count;
2195}
2196
2197static ssize_t
2198show_country(struct device *dev, struct device_attribute *attr,
2199		char *buf)
2200{
2201	struct hid_device *hdev = to_hid_device(dev);
2202
2203	return sprintf(buf, "%02x\n", hdev->country & 0xff);
2204}
2205
2206static struct bin_attribute dev_bin_attr_report_desc = {
2207	.attr = { .name = "report_descriptor", .mode = 0444 },
2208	.read = read_report_descriptor,
2209	.size = HID_MAX_DESCRIPTOR_SIZE,
2210};
2211
2212static const struct device_attribute dev_attr_country = {
2213	.attr = { .name = "country", .mode = 0444 },
2214	.show = show_country,
2215};
2216
2217int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2218{
2219	static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2220		"Joystick", "Gamepad", "Keyboard", "Keypad",
2221		"Multi-Axis Controller"
2222	};
2223	const char *type, *bus;
2224	char buf[64] = "";
2225	unsigned int i;
2226	int len;
2227	int ret;
2228
2229	ret = hid_bpf_connect_device(hdev);
2230	if (ret)
2231		return ret;
2232
2233	if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2234		connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2235	if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2236		connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2237	if (hdev->bus != BUS_USB)
2238		connect_mask &= ~HID_CONNECT_HIDDEV;
2239	if (hid_hiddev(hdev))
2240		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2241
2242	if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2243				connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2244		hdev->claimed |= HID_CLAIMED_INPUT;
2245
2246	if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2247			!hdev->hiddev_connect(hdev,
2248				connect_mask & HID_CONNECT_HIDDEV_FORCE))
2249		hdev->claimed |= HID_CLAIMED_HIDDEV;
2250	if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2251		hdev->claimed |= HID_CLAIMED_HIDRAW;
2252
2253	if (connect_mask & HID_CONNECT_DRIVER)
2254		hdev->claimed |= HID_CLAIMED_DRIVER;
2255
2256	/* Drivers with the ->raw_event callback set are not required to connect
2257	 * to any other listener. */
2258	if (!hdev->claimed && !hdev->driver->raw_event) {
2259		hid_err(hdev, "device has no listeners, quitting\n");
2260		return -ENODEV;
2261	}
2262
2263	hid_process_ordering(hdev);
2264
2265	if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2266			(connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2267		hdev->ff_init(hdev);
2268
2269	len = 0;
2270	if (hdev->claimed & HID_CLAIMED_INPUT)
2271		len += sprintf(buf + len, "input");
2272	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2273		len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2274				((struct hiddev *)hdev->hiddev)->minor);
2275	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2276		len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2277				((struct hidraw *)hdev->hidraw)->minor);
2278
2279	type = "Device";
2280	for (i = 0; i < hdev->maxcollection; i++) {
2281		struct hid_collection *col = &hdev->collection[i];
2282		if (col->type == HID_COLLECTION_APPLICATION &&
2283		   (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2284		   (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2285			type = types[col->usage & 0xffff];
2286			break;
2287		}
2288	}
2289
2290	switch (hdev->bus) {
2291	case BUS_USB:
2292		bus = "USB";
2293		break;
2294	case BUS_BLUETOOTH:
2295		bus = "BLUETOOTH";
2296		break;
2297	case BUS_I2C:
2298		bus = "I2C";
2299		break;
2300	case BUS_VIRTUAL:
2301		bus = "VIRTUAL";
2302		break;
2303	case BUS_INTEL_ISHTP:
2304	case BUS_AMD_SFH:
2305		bus = "SENSOR HUB";
2306		break;
2307	default:
2308		bus = "<UNKNOWN>";
2309	}
2310
2311	ret = device_create_file(&hdev->dev, &dev_attr_country);
2312	if (ret)
2313		hid_warn(hdev,
2314			 "can't create sysfs country code attribute err: %d\n", ret);
2315
2316	hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2317		 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2318		 type, hdev->name, hdev->phys);
2319
2320	return 0;
2321}
2322EXPORT_SYMBOL_GPL(hid_connect);
2323
2324void hid_disconnect(struct hid_device *hdev)
2325{
2326	device_remove_file(&hdev->dev, &dev_attr_country);
2327	if (hdev->claimed & HID_CLAIMED_INPUT)
2328		hidinput_disconnect(hdev);
2329	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2330		hdev->hiddev_disconnect(hdev);
2331	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2332		hidraw_disconnect(hdev);
2333	hdev->claimed = 0;
2334
2335	hid_bpf_disconnect_device(hdev);
2336}
2337EXPORT_SYMBOL_GPL(hid_disconnect);
2338
2339/**
2340 * hid_hw_start - start underlying HW
2341 * @hdev: hid device
2342 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2343 *
2344 * Call this in probe function *after* hid_parse. This will setup HW
2345 * buffers and start the device (if not defeirred to device open).
2346 * hid_hw_stop must be called if this was successful.
2347 */
2348int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2349{
2350	int error;
2351
2352	error = hdev->ll_driver->start(hdev);
2353	if (error)
2354		return error;
2355
2356	if (connect_mask) {
2357		error = hid_connect(hdev, connect_mask);
2358		if (error) {
2359			hdev->ll_driver->stop(hdev);
2360			return error;
2361		}
2362	}
2363
2364	return 0;
2365}
2366EXPORT_SYMBOL_GPL(hid_hw_start);
2367
2368/**
2369 * hid_hw_stop - stop underlying HW
2370 * @hdev: hid device
2371 *
2372 * This is usually called from remove function or from probe when something
2373 * failed and hid_hw_start was called already.
2374 */
2375void hid_hw_stop(struct hid_device *hdev)
2376{
2377	hid_disconnect(hdev);
2378	hdev->ll_driver->stop(hdev);
2379}
2380EXPORT_SYMBOL_GPL(hid_hw_stop);
2381
2382/**
2383 * hid_hw_open - signal underlying HW to start delivering events
2384 * @hdev: hid device
2385 *
2386 * Tell underlying HW to start delivering events from the device.
2387 * This function should be called sometime after successful call
2388 * to hid_hw_start().
2389 */
2390int hid_hw_open(struct hid_device *hdev)
2391{
2392	int ret;
2393
2394	ret = mutex_lock_killable(&hdev->ll_open_lock);
2395	if (ret)
2396		return ret;
2397
2398	if (!hdev->ll_open_count++) {
2399		ret = hdev->ll_driver->open(hdev);
2400		if (ret)
2401			hdev->ll_open_count--;
2402	}
2403
2404	mutex_unlock(&hdev->ll_open_lock);
2405	return ret;
2406}
2407EXPORT_SYMBOL_GPL(hid_hw_open);
2408
2409/**
2410 * hid_hw_close - signal underlaying HW to stop delivering events
2411 *
2412 * @hdev: hid device
2413 *
2414 * This function indicates that we are not interested in the events
2415 * from this device anymore. Delivery of events may or may not stop,
2416 * depending on the number of users still outstanding.
2417 */
2418void hid_hw_close(struct hid_device *hdev)
2419{
2420	mutex_lock(&hdev->ll_open_lock);
2421	if (!--hdev->ll_open_count)
2422		hdev->ll_driver->close(hdev);
2423	mutex_unlock(&hdev->ll_open_lock);
2424}
2425EXPORT_SYMBOL_GPL(hid_hw_close);
2426
2427/**
2428 * hid_hw_request - send report request to device
2429 *
2430 * @hdev: hid device
2431 * @report: report to send
2432 * @reqtype: hid request type
2433 */
2434void hid_hw_request(struct hid_device *hdev,
2435		    struct hid_report *report, enum hid_class_request reqtype)
2436{
2437	if (hdev->ll_driver->request)
2438		return hdev->ll_driver->request(hdev, report, reqtype);
2439
2440	__hid_request(hdev, report, reqtype);
2441}
2442EXPORT_SYMBOL_GPL(hid_hw_request);
2443
2444int __hid_hw_raw_request(struct hid_device *hdev,
2445			 unsigned char reportnum, __u8 *buf,
2446			 size_t len, enum hid_report_type rtype,
2447			 enum hid_class_request reqtype,
2448			 u64 source, bool from_bpf)
2449{
2450	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2451	int ret;
2452
2453	if (hdev->ll_driver->max_buffer_size)
2454		max_buffer_size = hdev->ll_driver->max_buffer_size;
2455
2456	if (len < 1 || len > max_buffer_size || !buf)
2457		return -EINVAL;
2458
2459	ret = dispatch_hid_bpf_raw_requests(hdev, reportnum, buf, len, rtype,
2460					    reqtype, source, from_bpf);
2461	if (ret)
2462		return ret;
2463
2464	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2465					    rtype, reqtype);
2466}
2467
2468/**
2469 * hid_hw_raw_request - send report request to device
2470 *
2471 * @hdev: hid device
2472 * @reportnum: report ID
2473 * @buf: in/out data to transfer
2474 * @len: length of buf
2475 * @rtype: HID report type
2476 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2477 *
2478 * Return: count of data transferred, negative if error
2479 *
2480 * Same behavior as hid_hw_request, but with raw buffers instead.
2481 */
2482int hid_hw_raw_request(struct hid_device *hdev,
2483		       unsigned char reportnum, __u8 *buf,
2484		       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2485{
2486	return __hid_hw_raw_request(hdev, reportnum, buf, len, rtype, reqtype, 0, false);
2487}
2488EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2489
2490int __hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len, u64 source,
2491			   bool from_bpf)
2492{
2493	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2494	int ret;
2495
2496	if (hdev->ll_driver->max_buffer_size)
2497		max_buffer_size = hdev->ll_driver->max_buffer_size;
2498
2499	if (len < 1 || len > max_buffer_size || !buf)
2500		return -EINVAL;
2501
2502	ret = dispatch_hid_bpf_output_report(hdev, buf, len, source, from_bpf);
2503	if (ret)
2504		return ret;
2505
2506	if (hdev->ll_driver->output_report)
2507		return hdev->ll_driver->output_report(hdev, buf, len);
2508
2509	return -ENOSYS;
2510}
 
2511
2512/**
2513 * hid_hw_output_report - send output report to device
2514 *
2515 * @hdev: hid device
2516 * @buf: raw data to transfer
2517 * @len: length of buf
2518 *
2519 * Return: count of data transferred, negative if error
2520 */
2521int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2522{
2523	return __hid_hw_output_report(hdev, buf, len, 0, false);
 
 
 
 
 
 
 
 
 
 
 
2524}
2525EXPORT_SYMBOL_GPL(hid_hw_output_report);
2526
2527#ifdef CONFIG_PM
2528int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2529{
2530	if (hdev->driver && hdev->driver->suspend)
2531		return hdev->driver->suspend(hdev, state);
2532
2533	return 0;
2534}
2535EXPORT_SYMBOL_GPL(hid_driver_suspend);
2536
2537int hid_driver_reset_resume(struct hid_device *hdev)
2538{
2539	if (hdev->driver && hdev->driver->reset_resume)
2540		return hdev->driver->reset_resume(hdev);
2541
2542	return 0;
2543}
2544EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2545
2546int hid_driver_resume(struct hid_device *hdev)
2547{
2548	if (hdev->driver && hdev->driver->resume)
2549		return hdev->driver->resume(hdev);
2550
2551	return 0;
2552}
2553EXPORT_SYMBOL_GPL(hid_driver_resume);
2554#endif /* CONFIG_PM */
2555
2556struct hid_dynid {
2557	struct list_head list;
2558	struct hid_device_id id;
2559};
2560
2561/**
2562 * new_id_store - add a new HID device ID to this driver and re-probe devices
2563 * @drv: target device driver
2564 * @buf: buffer for scanning device ID data
2565 * @count: input size
2566 *
2567 * Adds a new dynamic hid device ID to this driver,
2568 * and causes the driver to probe for all devices again.
2569 */
2570static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2571		size_t count)
2572{
2573	struct hid_driver *hdrv = to_hid_driver(drv);
2574	struct hid_dynid *dynid;
2575	__u32 bus, vendor, product;
2576	unsigned long driver_data = 0;
2577	int ret;
2578
2579	ret = sscanf(buf, "%x %x %x %lx",
2580			&bus, &vendor, &product, &driver_data);
2581	if (ret < 3)
2582		return -EINVAL;
2583
2584	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2585	if (!dynid)
2586		return -ENOMEM;
2587
2588	dynid->id.bus = bus;
2589	dynid->id.group = HID_GROUP_ANY;
2590	dynid->id.vendor = vendor;
2591	dynid->id.product = product;
2592	dynid->id.driver_data = driver_data;
2593
2594	spin_lock(&hdrv->dyn_lock);
2595	list_add_tail(&dynid->list, &hdrv->dyn_list);
2596	spin_unlock(&hdrv->dyn_lock);
2597
2598	ret = driver_attach(&hdrv->driver);
2599
2600	return ret ? : count;
2601}
2602static DRIVER_ATTR_WO(new_id);
2603
2604static struct attribute *hid_drv_attrs[] = {
2605	&driver_attr_new_id.attr,
2606	NULL,
2607};
2608ATTRIBUTE_GROUPS(hid_drv);
2609
2610static void hid_free_dynids(struct hid_driver *hdrv)
2611{
2612	struct hid_dynid *dynid, *n;
2613
2614	spin_lock(&hdrv->dyn_lock);
2615	list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2616		list_del(&dynid->list);
2617		kfree(dynid);
2618	}
2619	spin_unlock(&hdrv->dyn_lock);
2620}
2621
2622const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2623					     struct hid_driver *hdrv)
2624{
2625	struct hid_dynid *dynid;
2626
2627	spin_lock(&hdrv->dyn_lock);
2628	list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2629		if (hid_match_one_id(hdev, &dynid->id)) {
2630			spin_unlock(&hdrv->dyn_lock);
2631			return &dynid->id;
2632		}
2633	}
2634	spin_unlock(&hdrv->dyn_lock);
2635
2636	return hid_match_id(hdev, hdrv->id_table);
2637}
2638EXPORT_SYMBOL_GPL(hid_match_device);
2639
2640static int hid_bus_match(struct device *dev, const struct device_driver *drv)
2641{
2642	struct hid_driver *hdrv = to_hid_driver(drv);
2643	struct hid_device *hdev = to_hid_device(dev);
2644
2645	return hid_match_device(hdev, hdrv) != NULL;
2646}
2647
2648/**
2649 * hid_compare_device_paths - check if both devices share the same path
2650 * @hdev_a: hid device
2651 * @hdev_b: hid device
2652 * @separator: char to use as separator
2653 *
2654 * Check if two devices share the same path up to the last occurrence of
2655 * the separator char. Both paths must exist (i.e., zero-length paths
2656 * don't match).
2657 */
2658bool hid_compare_device_paths(struct hid_device *hdev_a,
2659			      struct hid_device *hdev_b, char separator)
2660{
2661	int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2662	int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2663
2664	if (n1 != n2 || n1 <= 0 || n2 <= 0)
2665		return false;
2666
2667	return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2668}
2669EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2670
2671static bool hid_check_device_match(struct hid_device *hdev,
2672				   struct hid_driver *hdrv,
2673				   const struct hid_device_id **id)
2674{
2675	*id = hid_match_device(hdev, hdrv);
2676	if (!*id)
2677		return false;
2678
2679	if (hdrv->match)
2680		return hdrv->match(hdev, hid_ignore_special_drivers);
2681
2682	/*
2683	 * hid-generic implements .match(), so we must be dealing with a
2684	 * different HID driver here, and can simply check if
2685	 * hid_ignore_special_drivers or HID_QUIRK_IGNORE_SPECIAL_DRIVER
2686	 * are set or not.
2687	 */
2688	return !hid_ignore_special_drivers && !(hdev->quirks & HID_QUIRK_IGNORE_SPECIAL_DRIVER);
2689}
2690
2691static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2692{
2693	const struct hid_device_id *id;
2694	int ret;
2695
2696	if (!hdev->bpf_rsize) {
2697		/* in case a bpf program gets detached, we need to free the old one */
2698		hid_free_bpf_rdesc(hdev);
2699
2700		/* keep this around so we know we called it once */
2701		hdev->bpf_rsize = hdev->dev_rsize;
2702
2703		/* call_hid_bpf_rdesc_fixup will always return a valid pointer */
2704		hdev->bpf_rdesc = call_hid_bpf_rdesc_fixup(hdev, hdev->dev_rdesc,
2705							   &hdev->bpf_rsize);
2706	}
2707
2708	if (!hid_check_device_match(hdev, hdrv, &id))
2709		return -ENODEV;
2710
2711	hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2712	if (!hdev->devres_group_id)
2713		return -ENOMEM;
2714
2715	/* reset the quirks that has been previously set */
2716	hdev->quirks = hid_lookup_quirk(hdev);
2717	hdev->driver = hdrv;
2718
2719	if (hdrv->probe) {
2720		ret = hdrv->probe(hdev, id);
2721	} else { /* default probe */
2722		ret = hid_open_report(hdev);
2723		if (!ret)
2724			ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2725	}
2726
2727	/*
2728	 * Note that we are not closing the devres group opened above so
2729	 * even resources that were attached to the device after probe is
2730	 * run are released when hid_device_remove() is executed. This is
2731	 * needed as some drivers would allocate additional resources,
2732	 * for example when updating firmware.
2733	 */
2734
2735	if (ret) {
2736		devres_release_group(&hdev->dev, hdev->devres_group_id);
2737		hid_close_report(hdev);
2738		hdev->driver = NULL;
2739	}
2740
2741	return ret;
2742}
2743
2744static int hid_device_probe(struct device *dev)
2745{
2746	struct hid_device *hdev = to_hid_device(dev);
2747	struct hid_driver *hdrv = to_hid_driver(dev->driver);
2748	int ret = 0;
2749
2750	if (down_interruptible(&hdev->driver_input_lock))
2751		return -EINTR;
2752
2753	hdev->io_started = false;
2754	clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2755
2756	if (!hdev->driver)
2757		ret = __hid_device_probe(hdev, hdrv);
2758
2759	if (!hdev->io_started)
2760		up(&hdev->driver_input_lock);
2761
2762	return ret;
2763}
2764
2765static void hid_device_remove(struct device *dev)
2766{
2767	struct hid_device *hdev = to_hid_device(dev);
2768	struct hid_driver *hdrv;
2769
2770	down(&hdev->driver_input_lock);
2771	hdev->io_started = false;
2772
2773	hdrv = hdev->driver;
2774	if (hdrv) {
2775		if (hdrv->remove)
2776			hdrv->remove(hdev);
2777		else /* default remove */
2778			hid_hw_stop(hdev);
2779
2780		/* Release all devres resources allocated by the driver */
2781		devres_release_group(&hdev->dev, hdev->devres_group_id);
2782
2783		hid_close_report(hdev);
2784		hdev->driver = NULL;
2785	}
2786
2787	if (!hdev->io_started)
2788		up(&hdev->driver_input_lock);
2789}
2790
2791static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2792			     char *buf)
2793{
2794	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2795
2796	return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2797			 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2798}
2799static DEVICE_ATTR_RO(modalias);
2800
2801static struct attribute *hid_dev_attrs[] = {
2802	&dev_attr_modalias.attr,
2803	NULL,
2804};
2805static struct bin_attribute *hid_dev_bin_attrs[] = {
2806	&dev_bin_attr_report_desc,
2807	NULL
2808};
2809static const struct attribute_group hid_dev_group = {
2810	.attrs = hid_dev_attrs,
2811	.bin_attrs = hid_dev_bin_attrs,
2812};
2813__ATTRIBUTE_GROUPS(hid_dev);
2814
2815static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2816{
2817	const struct hid_device *hdev = to_hid_device(dev);
2818
2819	if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2820			hdev->bus, hdev->vendor, hdev->product))
2821		return -ENOMEM;
2822
2823	if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2824		return -ENOMEM;
2825
2826	if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2827		return -ENOMEM;
2828
2829	if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2830		return -ENOMEM;
2831
2832	if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2833			   hdev->bus, hdev->group, hdev->vendor, hdev->product))
2834		return -ENOMEM;
2835
2836	return 0;
2837}
2838
2839const struct bus_type hid_bus_type = {
2840	.name		= "hid",
2841	.dev_groups	= hid_dev_groups,
2842	.drv_groups	= hid_drv_groups,
2843	.match		= hid_bus_match,
2844	.probe		= hid_device_probe,
2845	.remove		= hid_device_remove,
2846	.uevent		= hid_uevent,
2847};
2848EXPORT_SYMBOL(hid_bus_type);
2849
2850int hid_add_device(struct hid_device *hdev)
2851{
2852	static atomic_t id = ATOMIC_INIT(0);
2853	int ret;
2854
2855	if (WARN_ON(hdev->status & HID_STAT_ADDED))
2856		return -EBUSY;
2857
2858	hdev->quirks = hid_lookup_quirk(hdev);
2859
2860	/* we need to kill them here, otherwise they will stay allocated to
2861	 * wait for coming driver */
2862	if (hid_ignore(hdev))
2863		return -ENODEV;
2864
2865	/*
2866	 * Check for the mandatory transport channel.
2867	 */
2868	 if (!hdev->ll_driver->raw_request) {
2869		hid_err(hdev, "transport driver missing .raw_request()\n");
2870		return -EINVAL;
2871	 }
2872
2873	/*
2874	 * Read the device report descriptor once and use as template
2875	 * for the driver-specific modifications.
2876	 */
2877	ret = hdev->ll_driver->parse(hdev);
2878	if (ret)
2879		return ret;
2880	if (!hdev->dev_rdesc)
2881		return -ENODEV;
2882
2883	/*
2884	 * Scan generic devices for group information
2885	 */
2886	if (hid_ignore_special_drivers) {
2887		hdev->group = HID_GROUP_GENERIC;
2888	} else if (!hdev->group &&
2889		   !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2890		ret = hid_scan_report(hdev);
2891		if (ret)
2892			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2893	}
2894
2895	hdev->id = atomic_inc_return(&id);
2896
2897	/* XXX hack, any other cleaner solution after the driver core
2898	 * is converted to allow more than 20 bytes as the device name? */
2899	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2900		     hdev->vendor, hdev->product, hdev->id);
2901
2902	hid_debug_register(hdev, dev_name(&hdev->dev));
2903	ret = device_add(&hdev->dev);
2904	if (!ret)
2905		hdev->status |= HID_STAT_ADDED;
2906	else
2907		hid_debug_unregister(hdev);
2908
2909	return ret;
2910}
2911EXPORT_SYMBOL_GPL(hid_add_device);
2912
2913/**
2914 * hid_allocate_device - allocate new hid device descriptor
2915 *
2916 * Allocate and initialize hid device, so that hid_destroy_device might be
2917 * used to free it.
2918 *
2919 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2920 * error value.
2921 */
2922struct hid_device *hid_allocate_device(void)
2923{
2924	struct hid_device *hdev;
2925	int ret = -ENOMEM;
2926
2927	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2928	if (hdev == NULL)
2929		return ERR_PTR(ret);
2930
2931	device_initialize(&hdev->dev);
2932	hdev->dev.release = hid_device_release;
2933	hdev->dev.bus = &hid_bus_type;
2934	device_enable_async_suspend(&hdev->dev);
2935
2936	hid_close_report(hdev);
2937
2938	init_waitqueue_head(&hdev->debug_wait);
2939	INIT_LIST_HEAD(&hdev->debug_list);
2940	spin_lock_init(&hdev->debug_list_lock);
2941	sema_init(&hdev->driver_input_lock, 1);
2942	mutex_init(&hdev->ll_open_lock);
2943	kref_init(&hdev->ref);
2944
2945	ret = hid_bpf_device_init(hdev);
2946	if (ret)
2947		goto out_err;
2948
2949	return hdev;
2950
2951out_err:
2952	hid_destroy_device(hdev);
2953	return ERR_PTR(ret);
2954}
2955EXPORT_SYMBOL_GPL(hid_allocate_device);
2956
2957static void hid_remove_device(struct hid_device *hdev)
2958{
2959	if (hdev->status & HID_STAT_ADDED) {
2960		device_del(&hdev->dev);
2961		hid_debug_unregister(hdev);
2962		hdev->status &= ~HID_STAT_ADDED;
2963	}
2964	hid_free_bpf_rdesc(hdev);
2965	kfree(hdev->dev_rdesc);
2966	hdev->dev_rdesc = NULL;
2967	hdev->dev_rsize = 0;
2968	hdev->bpf_rsize = 0;
2969}
2970
2971/**
2972 * hid_destroy_device - free previously allocated device
2973 *
2974 * @hdev: hid device
2975 *
2976 * If you allocate hid_device through hid_allocate_device, you should ever
2977 * free by this function.
2978 */
2979void hid_destroy_device(struct hid_device *hdev)
2980{
2981	hid_bpf_destroy_device(hdev);
2982	hid_remove_device(hdev);
2983	put_device(&hdev->dev);
2984}
2985EXPORT_SYMBOL_GPL(hid_destroy_device);
2986
2987
2988static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2989{
2990	struct hid_driver *hdrv = data;
2991	struct hid_device *hdev = to_hid_device(dev);
2992
2993	if (hdev->driver == hdrv &&
2994	    !hdrv->match(hdev, hid_ignore_special_drivers) &&
2995	    !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2996		return device_reprobe(dev);
2997
2998	return 0;
2999}
3000
3001static int __hid_bus_driver_added(struct device_driver *drv, void *data)
3002{
3003	struct hid_driver *hdrv = to_hid_driver(drv);
3004
3005	if (hdrv->match) {
3006		bus_for_each_dev(&hid_bus_type, NULL, hdrv,
3007				 __hid_bus_reprobe_drivers);
3008	}
3009
3010	return 0;
3011}
3012
3013static int __bus_removed_driver(struct device_driver *drv, void *data)
3014{
3015	return bus_rescan_devices(&hid_bus_type);
3016}
3017
3018int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
3019		const char *mod_name)
3020{
3021	int ret;
3022
3023	hdrv->driver.name = hdrv->name;
3024	hdrv->driver.bus = &hid_bus_type;
3025	hdrv->driver.owner = owner;
3026	hdrv->driver.mod_name = mod_name;
3027
3028	INIT_LIST_HEAD(&hdrv->dyn_list);
3029	spin_lock_init(&hdrv->dyn_lock);
3030
3031	ret = driver_register(&hdrv->driver);
3032
3033	if (ret == 0)
3034		bus_for_each_drv(&hid_bus_type, NULL, NULL,
3035				 __hid_bus_driver_added);
3036
3037	return ret;
3038}
3039EXPORT_SYMBOL_GPL(__hid_register_driver);
3040
3041void hid_unregister_driver(struct hid_driver *hdrv)
3042{
3043	driver_unregister(&hdrv->driver);
3044	hid_free_dynids(hdrv);
3045
3046	bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
3047}
3048EXPORT_SYMBOL_GPL(hid_unregister_driver);
3049
3050int hid_check_keys_pressed(struct hid_device *hid)
3051{
3052	struct hid_input *hidinput;
3053	int i;
3054
3055	if (!(hid->claimed & HID_CLAIMED_INPUT))
3056		return 0;
3057
3058	list_for_each_entry(hidinput, &hid->inputs, list) {
3059		for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
3060			if (hidinput->input->key[i])
3061				return 1;
3062	}
3063
3064	return 0;
3065}
3066EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
3067
3068#ifdef CONFIG_HID_BPF
3069static const struct hid_ops __hid_ops = {
3070	.hid_get_report = hid_get_report,
3071	.hid_hw_raw_request = __hid_hw_raw_request,
3072	.hid_hw_output_report = __hid_hw_output_report,
3073	.hid_input_report = __hid_input_report,
3074	.owner = THIS_MODULE,
3075	.bus_type = &hid_bus_type,
3076};
3077#endif
3078
3079static int __init hid_init(void)
3080{
3081	int ret;
3082
3083	ret = bus_register(&hid_bus_type);
3084	if (ret) {
3085		pr_err("can't register hid bus\n");
3086		goto err;
3087	}
3088
3089#ifdef CONFIG_HID_BPF
3090	hid_ops = &__hid_ops;
3091#endif
3092
3093	ret = hidraw_init();
3094	if (ret)
3095		goto err_bus;
3096
3097	hid_debug_init();
3098
3099	return 0;
3100err_bus:
3101	bus_unregister(&hid_bus_type);
3102err:
3103	return ret;
3104}
3105
3106static void __exit hid_exit(void)
3107{
3108#ifdef CONFIG_HID_BPF
3109	hid_ops = NULL;
3110#endif
3111	hid_debug_exit();
3112	hidraw_exit();
3113	bus_unregister(&hid_bus_type);
3114	hid_quirks_exit(HID_BUS_ANY);
3115}
3116
3117module_init(hid_init);
3118module_exit(hid_exit);
3119
3120MODULE_AUTHOR("Andreas Gal");
3121MODULE_AUTHOR("Vojtech Pavlik");
3122MODULE_AUTHOR("Jiri Kosina");
3123MODULE_DESCRIPTION("HID support for Linux");
3124MODULE_LICENSE("GPL");
v6.8
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  HID support for Linux
   4 *
   5 *  Copyright (c) 1999 Andreas Gal
   6 *  Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz>
   7 *  Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc
   8 *  Copyright (c) 2006-2012 Jiri Kosina
   9 */
  10
  11/*
  12 */
  13
  14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  15
  16#include <linux/module.h>
  17#include <linux/slab.h>
  18#include <linux/init.h>
  19#include <linux/kernel.h>
  20#include <linux/list.h>
  21#include <linux/mm.h>
  22#include <linux/spinlock.h>
  23#include <asm/unaligned.h>
  24#include <asm/byteorder.h>
  25#include <linux/input.h>
  26#include <linux/wait.h>
  27#include <linux/vmalloc.h>
  28#include <linux/sched.h>
  29#include <linux/semaphore.h>
  30
  31#include <linux/hid.h>
  32#include <linux/hiddev.h>
  33#include <linux/hid-debug.h>
  34#include <linux/hidraw.h>
  35
  36#include "hid-ids.h"
  37
  38/*
  39 * Version Information
  40 */
  41
  42#define DRIVER_DESC "HID core driver"
  43
  44static int hid_ignore_special_drivers = 0;
  45module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600);
  46MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver");
  47
  48/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49 * Register a new report for a device.
  50 */
  51
  52struct hid_report *hid_register_report(struct hid_device *device,
  53				       enum hid_report_type type, unsigned int id,
  54				       unsigned int application)
  55{
  56	struct hid_report_enum *report_enum = device->report_enum + type;
  57	struct hid_report *report;
  58
  59	if (id >= HID_MAX_IDS)
  60		return NULL;
  61	if (report_enum->report_id_hash[id])
  62		return report_enum->report_id_hash[id];
  63
  64	report = kzalloc(sizeof(struct hid_report), GFP_KERNEL);
  65	if (!report)
  66		return NULL;
  67
  68	if (id != 0)
  69		report_enum->numbered = 1;
  70
  71	report->id = id;
  72	report->type = type;
  73	report->size = 0;
  74	report->device = device;
  75	report->application = application;
  76	report_enum->report_id_hash[id] = report;
  77
  78	list_add_tail(&report->list, &report_enum->report_list);
  79	INIT_LIST_HEAD(&report->field_entry_list);
  80
  81	return report;
  82}
  83EXPORT_SYMBOL_GPL(hid_register_report);
  84
  85/*
  86 * Register a new field for this report.
  87 */
  88
  89static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages)
  90{
  91	struct hid_field *field;
  92
  93	if (report->maxfield == HID_MAX_FIELDS) {
  94		hid_err(report->device, "too many fields in report\n");
  95		return NULL;
  96	}
  97
  98	field = kzalloc((sizeof(struct hid_field) +
  99			 usages * sizeof(struct hid_usage) +
 100			 3 * usages * sizeof(unsigned int)), GFP_KERNEL);
 101	if (!field)
 102		return NULL;
 103
 104	field->index = report->maxfield++;
 105	report->field[field->index] = field;
 106	field->usage = (struct hid_usage *)(field + 1);
 107	field->value = (s32 *)(field->usage + usages);
 108	field->new_value = (s32 *)(field->value + usages);
 109	field->usages_priorities = (s32 *)(field->new_value + usages);
 110	field->report = report;
 111
 112	return field;
 113}
 114
 115/*
 116 * Open a collection. The type/usage is pushed on the stack.
 117 */
 118
 119static int open_collection(struct hid_parser *parser, unsigned type)
 120{
 121	struct hid_collection *collection;
 122	unsigned usage;
 123	int collection_index;
 124
 125	usage = parser->local.usage[0];
 126
 127	if (parser->collection_stack_ptr == parser->collection_stack_size) {
 128		unsigned int *collection_stack;
 129		unsigned int new_size = parser->collection_stack_size +
 130					HID_COLLECTION_STACK_SIZE;
 131
 132		collection_stack = krealloc(parser->collection_stack,
 133					    new_size * sizeof(unsigned int),
 134					    GFP_KERNEL);
 135		if (!collection_stack)
 136			return -ENOMEM;
 137
 138		parser->collection_stack = collection_stack;
 139		parser->collection_stack_size = new_size;
 140	}
 141
 142	if (parser->device->maxcollection == parser->device->collection_size) {
 143		collection = kmalloc(
 144				array3_size(sizeof(struct hid_collection),
 145					    parser->device->collection_size,
 146					    2),
 147				GFP_KERNEL);
 148		if (collection == NULL) {
 149			hid_err(parser->device, "failed to reallocate collection array\n");
 150			return -ENOMEM;
 151		}
 152		memcpy(collection, parser->device->collection,
 153			sizeof(struct hid_collection) *
 154			parser->device->collection_size);
 155		memset(collection + parser->device->collection_size, 0,
 156			sizeof(struct hid_collection) *
 157			parser->device->collection_size);
 158		kfree(parser->device->collection);
 159		parser->device->collection = collection;
 160		parser->device->collection_size *= 2;
 161	}
 162
 163	parser->collection_stack[parser->collection_stack_ptr++] =
 164		parser->device->maxcollection;
 165
 166	collection_index = parser->device->maxcollection++;
 167	collection = parser->device->collection + collection_index;
 168	collection->type = type;
 169	collection->usage = usage;
 170	collection->level = parser->collection_stack_ptr - 1;
 171	collection->parent_idx = (collection->level == 0) ? -1 :
 172		parser->collection_stack[collection->level - 1];
 173
 174	if (type == HID_COLLECTION_APPLICATION)
 175		parser->device->maxapplication++;
 176
 177	return 0;
 178}
 179
 180/*
 181 * Close a collection.
 182 */
 183
 184static int close_collection(struct hid_parser *parser)
 185{
 186	if (!parser->collection_stack_ptr) {
 187		hid_err(parser->device, "collection stack underflow\n");
 188		return -EINVAL;
 189	}
 190	parser->collection_stack_ptr--;
 191	return 0;
 192}
 193
 194/*
 195 * Climb up the stack, search for the specified collection type
 196 * and return the usage.
 197 */
 198
 199static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
 200{
 201	struct hid_collection *collection = parser->device->collection;
 202	int n;
 203
 204	for (n = parser->collection_stack_ptr - 1; n >= 0; n--) {
 205		unsigned index = parser->collection_stack[n];
 206		if (collection[index].type == type)
 207			return collection[index].usage;
 208	}
 209	return 0; /* we know nothing about this usage type */
 210}
 211
 212/*
 213 * Concatenate usage which defines 16 bits or less with the
 214 * currently defined usage page to form a 32 bit usage
 215 */
 216
 217static void complete_usage(struct hid_parser *parser, unsigned int index)
 218{
 219	parser->local.usage[index] &= 0xFFFF;
 220	parser->local.usage[index] |=
 221		(parser->global.usage_page & 0xFFFF) << 16;
 222}
 223
 224/*
 225 * Add a usage to the temporary parser table.
 226 */
 227
 228static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
 229{
 230	if (parser->local.usage_index >= HID_MAX_USAGES) {
 231		hid_err(parser->device, "usage index exceeded\n");
 232		return -1;
 233	}
 234	parser->local.usage[parser->local.usage_index] = usage;
 235
 236	/*
 237	 * If Usage item only includes usage id, concatenate it with
 238	 * currently defined usage page
 239	 */
 240	if (size <= 2)
 241		complete_usage(parser, parser->local.usage_index);
 242
 243	parser->local.usage_size[parser->local.usage_index] = size;
 244	parser->local.collection_index[parser->local.usage_index] =
 245		parser->collection_stack_ptr ?
 246		parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
 247	parser->local.usage_index++;
 248	return 0;
 249}
 250
 251/*
 252 * Register a new field for this report.
 253 */
 254
 255static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags)
 256{
 257	struct hid_report *report;
 258	struct hid_field *field;
 259	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
 260	unsigned int usages;
 261	unsigned int offset;
 262	unsigned int i;
 263	unsigned int application;
 264
 265	application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION);
 266
 267	report = hid_register_report(parser->device, report_type,
 268				     parser->global.report_id, application);
 269	if (!report) {
 270		hid_err(parser->device, "hid_register_report failed\n");
 271		return -1;
 272	}
 273
 274	/* Handle both signed and unsigned cases properly */
 275	if ((parser->global.logical_minimum < 0 &&
 276		parser->global.logical_maximum <
 277		parser->global.logical_minimum) ||
 278		(parser->global.logical_minimum >= 0 &&
 279		(__u32)parser->global.logical_maximum <
 280		(__u32)parser->global.logical_minimum)) {
 281		dbg_hid("logical range invalid 0x%x 0x%x\n",
 282			parser->global.logical_minimum,
 283			parser->global.logical_maximum);
 284		return -1;
 285	}
 286
 287	offset = report->size;
 288	report->size += parser->global.report_size * parser->global.report_count;
 289
 290	if (parser->device->ll_driver->max_buffer_size)
 291		max_buffer_size = parser->device->ll_driver->max_buffer_size;
 292
 293	/* Total size check: Allow for possible report index byte */
 294	if (report->size > (max_buffer_size - 1) << 3) {
 295		hid_err(parser->device, "report is too long\n");
 296		return -1;
 297	}
 298
 299	if (!parser->local.usage_index) /* Ignore padding fields */
 300		return 0;
 301
 302	usages = max_t(unsigned, parser->local.usage_index,
 303				 parser->global.report_count);
 304
 305	field = hid_register_field(report, usages);
 306	if (!field)
 307		return 0;
 308
 309	field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL);
 310	field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL);
 311	field->application = application;
 312
 313	for (i = 0; i < usages; i++) {
 314		unsigned j = i;
 315		/* Duplicate the last usage we parsed if we have excess values */
 316		if (i >= parser->local.usage_index)
 317			j = parser->local.usage_index - 1;
 318		field->usage[i].hid = parser->local.usage[j];
 319		field->usage[i].collection_index =
 320			parser->local.collection_index[j];
 321		field->usage[i].usage_index = i;
 322		field->usage[i].resolution_multiplier = 1;
 323	}
 324
 325	field->maxusage = usages;
 326	field->flags = flags;
 327	field->report_offset = offset;
 328	field->report_type = report_type;
 329	field->report_size = parser->global.report_size;
 330	field->report_count = parser->global.report_count;
 331	field->logical_minimum = parser->global.logical_minimum;
 332	field->logical_maximum = parser->global.logical_maximum;
 333	field->physical_minimum = parser->global.physical_minimum;
 334	field->physical_maximum = parser->global.physical_maximum;
 335	field->unit_exponent = parser->global.unit_exponent;
 336	field->unit = parser->global.unit;
 337
 338	return 0;
 339}
 340
 341/*
 342 * Read data value from item.
 343 */
 344
 345static u32 item_udata(struct hid_item *item)
 346{
 347	switch (item->size) {
 348	case 1: return item->data.u8;
 349	case 2: return item->data.u16;
 350	case 4: return item->data.u32;
 351	}
 352	return 0;
 353}
 354
 355static s32 item_sdata(struct hid_item *item)
 356{
 357	switch (item->size) {
 358	case 1: return item->data.s8;
 359	case 2: return item->data.s16;
 360	case 4: return item->data.s32;
 361	}
 362	return 0;
 363}
 364
 365/*
 366 * Process a global item.
 367 */
 368
 369static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
 370{
 371	__s32 raw_value;
 372	switch (item->tag) {
 373	case HID_GLOBAL_ITEM_TAG_PUSH:
 374
 375		if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
 376			hid_err(parser->device, "global environment stack overflow\n");
 377			return -1;
 378		}
 379
 380		memcpy(parser->global_stack + parser->global_stack_ptr++,
 381			&parser->global, sizeof(struct hid_global));
 382		return 0;
 383
 384	case HID_GLOBAL_ITEM_TAG_POP:
 385
 386		if (!parser->global_stack_ptr) {
 387			hid_err(parser->device, "global environment stack underflow\n");
 388			return -1;
 389		}
 390
 391		memcpy(&parser->global, parser->global_stack +
 392			--parser->global_stack_ptr, sizeof(struct hid_global));
 393		return 0;
 394
 395	case HID_GLOBAL_ITEM_TAG_USAGE_PAGE:
 396		parser->global.usage_page = item_udata(item);
 397		return 0;
 398
 399	case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM:
 400		parser->global.logical_minimum = item_sdata(item);
 401		return 0;
 402
 403	case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM:
 404		if (parser->global.logical_minimum < 0)
 405			parser->global.logical_maximum = item_sdata(item);
 406		else
 407			parser->global.logical_maximum = item_udata(item);
 408		return 0;
 409
 410	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM:
 411		parser->global.physical_minimum = item_sdata(item);
 412		return 0;
 413
 414	case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM:
 415		if (parser->global.physical_minimum < 0)
 416			parser->global.physical_maximum = item_sdata(item);
 417		else
 418			parser->global.physical_maximum = item_udata(item);
 419		return 0;
 420
 421	case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT:
 422		/* Many devices provide unit exponent as a two's complement
 423		 * nibble due to the common misunderstanding of HID
 424		 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle
 425		 * both this and the standard encoding. */
 426		raw_value = item_sdata(item);
 427		if (!(raw_value & 0xfffffff0))
 428			parser->global.unit_exponent = hid_snto32(raw_value, 4);
 429		else
 430			parser->global.unit_exponent = raw_value;
 431		return 0;
 432
 433	case HID_GLOBAL_ITEM_TAG_UNIT:
 434		parser->global.unit = item_udata(item);
 435		return 0;
 436
 437	case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
 438		parser->global.report_size = item_udata(item);
 439		if (parser->global.report_size > 256) {
 440			hid_err(parser->device, "invalid report_size %d\n",
 441					parser->global.report_size);
 442			return -1;
 443		}
 444		return 0;
 445
 446	case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
 447		parser->global.report_count = item_udata(item);
 448		if (parser->global.report_count > HID_MAX_USAGES) {
 449			hid_err(parser->device, "invalid report_count %d\n",
 450					parser->global.report_count);
 451			return -1;
 452		}
 453		return 0;
 454
 455	case HID_GLOBAL_ITEM_TAG_REPORT_ID:
 456		parser->global.report_id = item_udata(item);
 457		if (parser->global.report_id == 0 ||
 458		    parser->global.report_id >= HID_MAX_IDS) {
 459			hid_err(parser->device, "report_id %u is invalid\n",
 460				parser->global.report_id);
 461			return -1;
 462		}
 463		return 0;
 464
 465	default:
 466		hid_err(parser->device, "unknown global tag 0x%x\n", item->tag);
 467		return -1;
 468	}
 469}
 470
 471/*
 472 * Process a local item.
 473 */
 474
 475static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
 476{
 477	__u32 data;
 478	unsigned n;
 479	__u32 count;
 480
 481	data = item_udata(item);
 482
 483	switch (item->tag) {
 484	case HID_LOCAL_ITEM_TAG_DELIMITER:
 485
 486		if (data) {
 487			/*
 488			 * We treat items before the first delimiter
 489			 * as global to all usage sets (branch 0).
 490			 * In the moment we process only these global
 491			 * items and the first delimiter set.
 492			 */
 493			if (parser->local.delimiter_depth != 0) {
 494				hid_err(parser->device, "nested delimiters\n");
 495				return -1;
 496			}
 497			parser->local.delimiter_depth++;
 498			parser->local.delimiter_branch++;
 499		} else {
 500			if (parser->local.delimiter_depth < 1) {
 501				hid_err(parser->device, "bogus close delimiter\n");
 502				return -1;
 503			}
 504			parser->local.delimiter_depth--;
 505		}
 506		return 0;
 507
 508	case HID_LOCAL_ITEM_TAG_USAGE:
 509
 510		if (parser->local.delimiter_branch > 1) {
 511			dbg_hid("alternative usage ignored\n");
 512			return 0;
 513		}
 514
 515		return hid_add_usage(parser, data, item->size);
 516
 517	case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
 518
 519		if (parser->local.delimiter_branch > 1) {
 520			dbg_hid("alternative usage ignored\n");
 521			return 0;
 522		}
 523
 524		parser->local.usage_minimum = data;
 525		return 0;
 526
 527	case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
 528
 529		if (parser->local.delimiter_branch > 1) {
 530			dbg_hid("alternative usage ignored\n");
 531			return 0;
 532		}
 533
 534		count = data - parser->local.usage_minimum;
 535		if (count + parser->local.usage_index >= HID_MAX_USAGES) {
 536			/*
 537			 * We do not warn if the name is not set, we are
 538			 * actually pre-scanning the device.
 539			 */
 540			if (dev_name(&parser->device->dev))
 541				hid_warn(parser->device,
 542					 "ignoring exceeding usage max\n");
 543			data = HID_MAX_USAGES - parser->local.usage_index +
 544				parser->local.usage_minimum - 1;
 545			if (data <= 0) {
 546				hid_err(parser->device,
 547					"no more usage index available\n");
 548				return -1;
 549			}
 550		}
 551
 552		for (n = parser->local.usage_minimum; n <= data; n++)
 553			if (hid_add_usage(parser, n, item->size)) {
 554				dbg_hid("hid_add_usage failed\n");
 555				return -1;
 556			}
 557		return 0;
 558
 559	default:
 560
 561		dbg_hid("unknown local item tag 0x%x\n", item->tag);
 562		return 0;
 563	}
 564	return 0;
 565}
 566
 567/*
 568 * Concatenate Usage Pages into Usages where relevant:
 569 * As per specification, 6.2.2.8: "When the parser encounters a main item it
 570 * concatenates the last declared Usage Page with a Usage to form a complete
 571 * usage value."
 572 */
 573
 574static void hid_concatenate_last_usage_page(struct hid_parser *parser)
 575{
 576	int i;
 577	unsigned int usage_page;
 578	unsigned int current_page;
 579
 580	if (!parser->local.usage_index)
 581		return;
 582
 583	usage_page = parser->global.usage_page;
 584
 585	/*
 586	 * Concatenate usage page again only if last declared Usage Page
 587	 * has not been already used in previous usages concatenation
 588	 */
 589	for (i = parser->local.usage_index - 1; i >= 0; i--) {
 590		if (parser->local.usage_size[i] > 2)
 591			/* Ignore extended usages */
 592			continue;
 593
 594		current_page = parser->local.usage[i] >> 16;
 595		if (current_page == usage_page)
 596			break;
 597
 598		complete_usage(parser, i);
 599	}
 600}
 601
 602/*
 603 * Process a main item.
 604 */
 605
 606static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
 607{
 608	__u32 data;
 609	int ret;
 610
 611	hid_concatenate_last_usage_page(parser);
 612
 613	data = item_udata(item);
 614
 615	switch (item->tag) {
 616	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
 617		ret = open_collection(parser, data & 0xff);
 618		break;
 619	case HID_MAIN_ITEM_TAG_END_COLLECTION:
 620		ret = close_collection(parser);
 621		break;
 622	case HID_MAIN_ITEM_TAG_INPUT:
 623		ret = hid_add_field(parser, HID_INPUT_REPORT, data);
 624		break;
 625	case HID_MAIN_ITEM_TAG_OUTPUT:
 626		ret = hid_add_field(parser, HID_OUTPUT_REPORT, data);
 627		break;
 628	case HID_MAIN_ITEM_TAG_FEATURE:
 629		ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
 630		break;
 631	default:
 632		hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag);
 633		ret = 0;
 634	}
 635
 636	memset(&parser->local, 0, sizeof(parser->local));	/* Reset the local parser environment */
 637
 638	return ret;
 639}
 640
 641/*
 642 * Process a reserved item.
 643 */
 644
 645static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
 646{
 647	dbg_hid("reserved item type, tag 0x%x\n", item->tag);
 648	return 0;
 649}
 650
 651/*
 652 * Free a report and all registered fields. The field->usage and
 653 * field->value table's are allocated behind the field, so we need
 654 * only to free(field) itself.
 655 */
 656
 657static void hid_free_report(struct hid_report *report)
 658{
 659	unsigned n;
 660
 661	kfree(report->field_entries);
 662
 663	for (n = 0; n < report->maxfield; n++)
 664		kfree(report->field[n]);
 665	kfree(report);
 666}
 667
 668/*
 669 * Close report. This function returns the device
 670 * state to the point prior to hid_open_report().
 671 */
 672static void hid_close_report(struct hid_device *device)
 673{
 674	unsigned i, j;
 675
 676	for (i = 0; i < HID_REPORT_TYPES; i++) {
 677		struct hid_report_enum *report_enum = device->report_enum + i;
 678
 679		for (j = 0; j < HID_MAX_IDS; j++) {
 680			struct hid_report *report = report_enum->report_id_hash[j];
 681			if (report)
 682				hid_free_report(report);
 683		}
 684		memset(report_enum, 0, sizeof(*report_enum));
 685		INIT_LIST_HEAD(&report_enum->report_list);
 686	}
 687
 688	kfree(device->rdesc);
 
 
 
 
 
 
 
 689	device->rdesc = NULL;
 690	device->rsize = 0;
 691
 692	kfree(device->collection);
 693	device->collection = NULL;
 694	device->collection_size = 0;
 695	device->maxcollection = 0;
 696	device->maxapplication = 0;
 697
 698	device->status &= ~HID_STAT_PARSED;
 699}
 700
 
 
 
 
 
 
 
 
 701/*
 702 * Free a device structure, all reports, and all fields.
 703 */
 704
 705void hiddev_free(struct kref *ref)
 706{
 707	struct hid_device *hid = container_of(ref, struct hid_device, ref);
 708
 709	hid_close_report(hid);
 
 710	kfree(hid->dev_rdesc);
 711	kfree(hid);
 712}
 713
 714static void hid_device_release(struct device *dev)
 715{
 716	struct hid_device *hid = to_hid_device(dev);
 717
 718	kref_put(&hid->ref, hiddev_free);
 719}
 720
 721/*
 722 * Fetch a report description item from the data stream. We support long
 723 * items, though they are not used yet.
 724 */
 725
 726static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item)
 727{
 728	u8 b;
 729
 730	if ((end - start) <= 0)
 731		return NULL;
 732
 733	b = *start++;
 734
 735	item->type = (b >> 2) & 3;
 736	item->tag  = (b >> 4) & 15;
 737
 738	if (item->tag == HID_ITEM_TAG_LONG) {
 739
 740		item->format = HID_ITEM_FORMAT_LONG;
 741
 742		if ((end - start) < 2)
 743			return NULL;
 744
 745		item->size = *start++;
 746		item->tag  = *start++;
 747
 748		if ((end - start) < item->size)
 749			return NULL;
 750
 751		item->data.longdata = start;
 752		start += item->size;
 753		return start;
 754	}
 755
 756	item->format = HID_ITEM_FORMAT_SHORT;
 757	item->size = b & 3;
 
 
 
 758
 759	switch (item->size) {
 760	case 0:
 761		return start;
 762
 763	case 1:
 764		if ((end - start) < 1)
 765			return NULL;
 766		item->data.u8 = *start++;
 767		return start;
 768
 769	case 2:
 770		if ((end - start) < 2)
 771			return NULL;
 772		item->data.u16 = get_unaligned_le16(start);
 773		start = (__u8 *)((__le16 *)start + 1);
 774		return start;
 775
 776	case 3:
 777		item->size++;
 778		if ((end - start) < 4)
 779			return NULL;
 780		item->data.u32 = get_unaligned_le32(start);
 781		start = (__u8 *)((__le32 *)start + 1);
 782		return start;
 783	}
 784
 785	return NULL;
 786}
 787
 788static void hid_scan_input_usage(struct hid_parser *parser, u32 usage)
 789{
 790	struct hid_device *hid = parser->device;
 791
 792	if (usage == HID_DG_CONTACTID)
 793		hid->group = HID_GROUP_MULTITOUCH;
 794}
 795
 796static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage)
 797{
 798	if (usage == 0xff0000c5 && parser->global.report_count == 256 &&
 799	    parser->global.report_size == 8)
 800		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
 801
 802	if (usage == 0xff0000c6 && parser->global.report_count == 1 &&
 803	    parser->global.report_size == 8)
 804		parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8;
 805}
 806
 807static void hid_scan_collection(struct hid_parser *parser, unsigned type)
 808{
 809	struct hid_device *hid = parser->device;
 810	int i;
 811
 812	if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
 813	    (type == HID_COLLECTION_PHYSICAL ||
 814	     type == HID_COLLECTION_APPLICATION))
 815		hid->group = HID_GROUP_SENSOR_HUB;
 816
 817	if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
 818	    hid->product == USB_DEVICE_ID_MS_POWER_COVER &&
 819	    hid->group == HID_GROUP_MULTITOUCH)
 820		hid->group = HID_GROUP_GENERIC;
 821
 822	if ((parser->global.usage_page << 16) == HID_UP_GENDESK)
 823		for (i = 0; i < parser->local.usage_index; i++)
 824			if (parser->local.usage[i] == HID_GD_POINTER)
 825				parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER;
 826
 827	if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR)
 828		parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC;
 829
 830	if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR)
 831		for (i = 0; i < parser->local.usage_index; i++)
 832			if (parser->local.usage[i] ==
 833					(HID_UP_GOOGLEVENDOR | 0x0001))
 834				parser->device->group =
 835					HID_GROUP_VIVALDI;
 836}
 837
 838static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
 839{
 840	__u32 data;
 841	int i;
 842
 843	hid_concatenate_last_usage_page(parser);
 844
 845	data = item_udata(item);
 846
 847	switch (item->tag) {
 848	case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION:
 849		hid_scan_collection(parser, data & 0xff);
 850		break;
 851	case HID_MAIN_ITEM_TAG_END_COLLECTION:
 852		break;
 853	case HID_MAIN_ITEM_TAG_INPUT:
 854		/* ignore constant inputs, they will be ignored by hid-input */
 855		if (data & HID_MAIN_ITEM_CONSTANT)
 856			break;
 857		for (i = 0; i < parser->local.usage_index; i++)
 858			hid_scan_input_usage(parser, parser->local.usage[i]);
 859		break;
 860	case HID_MAIN_ITEM_TAG_OUTPUT:
 861		break;
 862	case HID_MAIN_ITEM_TAG_FEATURE:
 863		for (i = 0; i < parser->local.usage_index; i++)
 864			hid_scan_feature_usage(parser, parser->local.usage[i]);
 865		break;
 866	}
 867
 868	/* Reset the local parser environment */
 869	memset(&parser->local, 0, sizeof(parser->local));
 870
 871	return 0;
 872}
 873
 874/*
 875 * Scan a report descriptor before the device is added to the bus.
 876 * Sets device groups and other properties that determine what driver
 877 * to load.
 878 */
 879static int hid_scan_report(struct hid_device *hid)
 880{
 881	struct hid_parser *parser;
 882	struct hid_item item;
 883	__u8 *start = hid->dev_rdesc;
 884	__u8 *end = start + hid->dev_rsize;
 885	static int (*dispatch_type[])(struct hid_parser *parser,
 886				      struct hid_item *item) = {
 887		hid_scan_main,
 888		hid_parser_global,
 889		hid_parser_local,
 890		hid_parser_reserved
 891	};
 892
 893	parser = vzalloc(sizeof(struct hid_parser));
 894	if (!parser)
 895		return -ENOMEM;
 896
 897	parser->device = hid;
 898	hid->group = HID_GROUP_GENERIC;
 899
 900	/*
 901	 * The parsing is simpler than the one in hid_open_report() as we should
 902	 * be robust against hid errors. Those errors will be raised by
 903	 * hid_open_report() anyway.
 904	 */
 905	while ((start = fetch_item(start, end, &item)) != NULL)
 906		dispatch_type[item.type](parser, &item);
 907
 908	/*
 909	 * Handle special flags set during scanning.
 910	 */
 911	if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) &&
 912	    (hid->group == HID_GROUP_MULTITOUCH))
 913		hid->group = HID_GROUP_MULTITOUCH_WIN_8;
 914
 915	/*
 916	 * Vendor specific handlings
 917	 */
 918	switch (hid->vendor) {
 919	case USB_VENDOR_ID_WACOM:
 920		hid->group = HID_GROUP_WACOM;
 921		break;
 922	case USB_VENDOR_ID_SYNAPTICS:
 923		if (hid->group == HID_GROUP_GENERIC)
 924			if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC)
 925			    && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER))
 926				/*
 927				 * hid-rmi should take care of them,
 928				 * not hid-generic
 929				 */
 930				hid->group = HID_GROUP_RMI;
 931		break;
 932	}
 933
 934	kfree(parser->collection_stack);
 935	vfree(parser);
 936	return 0;
 937}
 938
 939/**
 940 * hid_parse_report - parse device report
 941 *
 942 * @hid: hid device
 943 * @start: report start
 944 * @size: report size
 945 *
 946 * Allocate the device report as read by the bus driver. This function should
 947 * only be called from parse() in ll drivers.
 948 */
 949int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size)
 950{
 951	hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL);
 952	if (!hid->dev_rdesc)
 953		return -ENOMEM;
 954	hid->dev_rsize = size;
 955	return 0;
 956}
 957EXPORT_SYMBOL_GPL(hid_parse_report);
 958
 959static const char * const hid_report_names[] = {
 960	"HID_INPUT_REPORT",
 961	"HID_OUTPUT_REPORT",
 962	"HID_FEATURE_REPORT",
 963};
 964/**
 965 * hid_validate_values - validate existing device report's value indexes
 966 *
 967 * @hid: hid device
 968 * @type: which report type to examine
 969 * @id: which report ID to examine (0 for first)
 970 * @field_index: which report field to examine
 971 * @report_counts: expected number of values
 972 *
 973 * Validate the number of values in a given field of a given report, after
 974 * parsing.
 975 */
 976struct hid_report *hid_validate_values(struct hid_device *hid,
 977				       enum hid_report_type type, unsigned int id,
 978				       unsigned int field_index,
 979				       unsigned int report_counts)
 980{
 981	struct hid_report *report;
 982
 983	if (type > HID_FEATURE_REPORT) {
 984		hid_err(hid, "invalid HID report type %u\n", type);
 985		return NULL;
 986	}
 987
 988	if (id >= HID_MAX_IDS) {
 989		hid_err(hid, "invalid HID report id %u\n", id);
 990		return NULL;
 991	}
 992
 993	/*
 994	 * Explicitly not using hid_get_report() here since it depends on
 995	 * ->numbered being checked, which may not always be the case when
 996	 * drivers go to access report values.
 997	 */
 998	if (id == 0) {
 999		/*
1000		 * Validating on id 0 means we should examine the first
1001		 * report in the list.
1002		 */
1003		report = list_first_entry_or_null(
1004				&hid->report_enum[type].report_list,
1005				struct hid_report, list);
1006	} else {
1007		report = hid->report_enum[type].report_id_hash[id];
1008	}
1009	if (!report) {
1010		hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
1011		return NULL;
1012	}
1013	if (report->maxfield <= field_index) {
1014		hid_err(hid, "not enough fields in %s %u\n",
1015			hid_report_names[type], id);
1016		return NULL;
1017	}
1018	if (report->field[field_index]->report_count < report_counts) {
1019		hid_err(hid, "not enough values in %s %u field %u\n",
1020			hid_report_names[type], id, field_index);
1021		return NULL;
1022	}
1023	return report;
1024}
1025EXPORT_SYMBOL_GPL(hid_validate_values);
1026
1027static int hid_calculate_multiplier(struct hid_device *hid,
1028				     struct hid_field *multiplier)
1029{
1030	int m;
1031	__s32 v = *multiplier->value;
1032	__s32 lmin = multiplier->logical_minimum;
1033	__s32 lmax = multiplier->logical_maximum;
1034	__s32 pmin = multiplier->physical_minimum;
1035	__s32 pmax = multiplier->physical_maximum;
1036
1037	/*
1038	 * "Because OS implementations will generally divide the control's
1039	 * reported count by the Effective Resolution Multiplier, designers
1040	 * should take care not to establish a potential Effective
1041	 * Resolution Multiplier of zero."
1042	 * HID Usage Table, v1.12, Section 4.3.1, p31
1043	 */
1044	if (lmax - lmin == 0)
1045		return 1;
1046	/*
1047	 * Handling the unit exponent is left as an exercise to whoever
1048	 * finds a device where that exponent is not 0.
1049	 */
1050	m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin);
1051	if (unlikely(multiplier->unit_exponent != 0)) {
1052		hid_warn(hid,
1053			 "unsupported Resolution Multiplier unit exponent %d\n",
1054			 multiplier->unit_exponent);
1055	}
1056
1057	/* There are no devices with an effective multiplier > 255 */
1058	if (unlikely(m == 0 || m > 255 || m < -255)) {
1059		hid_warn(hid, "unsupported Resolution Multiplier %d\n", m);
1060		m = 1;
1061	}
1062
1063	return m;
1064}
1065
1066static void hid_apply_multiplier_to_field(struct hid_device *hid,
1067					  struct hid_field *field,
1068					  struct hid_collection *multiplier_collection,
1069					  int effective_multiplier)
1070{
1071	struct hid_collection *collection;
1072	struct hid_usage *usage;
1073	int i;
1074
1075	/*
1076	 * If multiplier_collection is NULL, the multiplier applies
1077	 * to all fields in the report.
1078	 * Otherwise, it is the Logical Collection the multiplier applies to
1079	 * but our field may be in a subcollection of that collection.
1080	 */
1081	for (i = 0; i < field->maxusage; i++) {
1082		usage = &field->usage[i];
1083
1084		collection = &hid->collection[usage->collection_index];
1085		while (collection->parent_idx != -1 &&
1086		       collection != multiplier_collection)
1087			collection = &hid->collection[collection->parent_idx];
1088
1089		if (collection->parent_idx != -1 ||
1090		    multiplier_collection == NULL)
1091			usage->resolution_multiplier = effective_multiplier;
1092
1093	}
1094}
1095
1096static void hid_apply_multiplier(struct hid_device *hid,
1097				 struct hid_field *multiplier)
1098{
1099	struct hid_report_enum *rep_enum;
1100	struct hid_report *rep;
1101	struct hid_field *field;
1102	struct hid_collection *multiplier_collection;
1103	int effective_multiplier;
1104	int i;
1105
1106	/*
1107	 * "The Resolution Multiplier control must be contained in the same
1108	 * Logical Collection as the control(s) to which it is to be applied.
1109	 * If no Resolution Multiplier is defined, then the Resolution
1110	 * Multiplier defaults to 1.  If more than one control exists in a
1111	 * Logical Collection, the Resolution Multiplier is associated with
1112	 * all controls in the collection. If no Logical Collection is
1113	 * defined, the Resolution Multiplier is associated with all
1114	 * controls in the report."
1115	 * HID Usage Table, v1.12, Section 4.3.1, p30
1116	 *
1117	 * Thus, search from the current collection upwards until we find a
1118	 * logical collection. Then search all fields for that same parent
1119	 * collection. Those are the fields the multiplier applies to.
1120	 *
1121	 * If we have more than one multiplier, it will overwrite the
1122	 * applicable fields later.
1123	 */
1124	multiplier_collection = &hid->collection[multiplier->usage->collection_index];
1125	while (multiplier_collection->parent_idx != -1 &&
1126	       multiplier_collection->type != HID_COLLECTION_LOGICAL)
1127		multiplier_collection = &hid->collection[multiplier_collection->parent_idx];
 
 
1128
1129	effective_multiplier = hid_calculate_multiplier(hid, multiplier);
1130
1131	rep_enum = &hid->report_enum[HID_INPUT_REPORT];
1132	list_for_each_entry(rep, &rep_enum->report_list, list) {
1133		for (i = 0; i < rep->maxfield; i++) {
1134			field = rep->field[i];
1135			hid_apply_multiplier_to_field(hid, field,
1136						      multiplier_collection,
1137						      effective_multiplier);
1138		}
1139	}
1140}
1141
1142/*
1143 * hid_setup_resolution_multiplier - set up all resolution multipliers
1144 *
1145 * @device: hid device
1146 *
1147 * Search for all Resolution Multiplier Feature Reports and apply their
1148 * value to all matching Input items. This only updates the internal struct
1149 * fields.
1150 *
1151 * The Resolution Multiplier is applied by the hardware. If the multiplier
1152 * is anything other than 1, the hardware will send pre-multiplied events
1153 * so that the same physical interaction generates an accumulated
1154 *	accumulated_value = value * * multiplier
1155 * This may be achieved by sending
1156 * - "value * multiplier" for each event, or
1157 * - "value" but "multiplier" times as frequently, or
1158 * - a combination of the above
1159 * The only guarantee is that the same physical interaction always generates
1160 * an accumulated 'value * multiplier'.
1161 *
1162 * This function must be called before any event processing and after
1163 * any SetRequest to the Resolution Multiplier.
1164 */
1165void hid_setup_resolution_multiplier(struct hid_device *hid)
1166{
1167	struct hid_report_enum *rep_enum;
1168	struct hid_report *rep;
1169	struct hid_usage *usage;
1170	int i, j;
1171
1172	rep_enum = &hid->report_enum[HID_FEATURE_REPORT];
1173	list_for_each_entry(rep, &rep_enum->report_list, list) {
1174		for (i = 0; i < rep->maxfield; i++) {
1175			/* Ignore if report count is out of bounds. */
1176			if (rep->field[i]->report_count < 1)
1177				continue;
1178
1179			for (j = 0; j < rep->field[i]->maxusage; j++) {
1180				usage = &rep->field[i]->usage[j];
1181				if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER)
1182					hid_apply_multiplier(hid,
1183							     rep->field[i]);
1184			}
1185		}
1186	}
1187}
1188EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier);
1189
1190/**
1191 * hid_open_report - open a driver-specific device report
1192 *
1193 * @device: hid device
1194 *
1195 * Parse a report description into a hid_device structure. Reports are
1196 * enumerated, fields are attached to these reports.
1197 * 0 returned on success, otherwise nonzero error value.
1198 *
1199 * This function (or the equivalent hid_parse() macro) should only be
1200 * called from probe() in drivers, before starting the device.
1201 */
1202int hid_open_report(struct hid_device *device)
1203{
1204	struct hid_parser *parser;
1205	struct hid_item item;
1206	unsigned int size;
1207	__u8 *start;
1208	__u8 *buf;
1209	__u8 *end;
1210	__u8 *next;
1211	int ret;
1212	int i;
1213	static int (*dispatch_type[])(struct hid_parser *parser,
1214				      struct hid_item *item) = {
1215		hid_parser_main,
1216		hid_parser_global,
1217		hid_parser_local,
1218		hid_parser_reserved
1219	};
1220
1221	if (WARN_ON(device->status & HID_STAT_PARSED))
1222		return -EBUSY;
1223
1224	start = device->dev_rdesc;
1225	if (WARN_ON(!start))
1226		return -ENODEV;
1227	size = device->dev_rsize;
1228
1229	/* call_hid_bpf_rdesc_fixup() ensures we work on a copy of rdesc */
1230	buf = call_hid_bpf_rdesc_fixup(device, start, &size);
1231	if (buf == NULL)
1232		return -ENOMEM;
 
 
 
 
 
 
1233
1234	if (device->driver->report_fixup)
1235		start = device->driver->report_fixup(device, buf, &size);
1236	else
1237		start = buf;
1238
1239	start = kmemdup(start, size, GFP_KERNEL);
1240	kfree(buf);
1241	if (start == NULL)
1242		return -ENOMEM;
 
 
 
 
 
 
1243
1244	device->rdesc = start;
1245	device->rsize = size;
1246
1247	parser = vzalloc(sizeof(struct hid_parser));
1248	if (!parser) {
1249		ret = -ENOMEM;
1250		goto alloc_err;
1251	}
1252
1253	parser->device = device;
1254
1255	end = start + size;
1256
1257	device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS,
1258				     sizeof(struct hid_collection), GFP_KERNEL);
1259	if (!device->collection) {
1260		ret = -ENOMEM;
1261		goto err;
1262	}
1263	device->collection_size = HID_DEFAULT_NUM_COLLECTIONS;
1264	for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++)
1265		device->collection[i].parent_idx = -1;
1266
1267	ret = -EINVAL;
1268	while ((next = fetch_item(start, end, &item)) != NULL) {
1269		start = next;
1270
1271		if (item.format != HID_ITEM_FORMAT_SHORT) {
1272			hid_err(device, "unexpected long global item\n");
1273			goto err;
1274		}
1275
1276		if (dispatch_type[item.type](parser, &item)) {
1277			hid_err(device, "item %u %u %u %u parsing failed\n",
1278				item.format, (unsigned)item.size,
1279				(unsigned)item.type, (unsigned)item.tag);
1280			goto err;
1281		}
1282
1283		if (start == end) {
1284			if (parser->collection_stack_ptr) {
1285				hid_err(device, "unbalanced collection at end of report description\n");
1286				goto err;
1287			}
1288			if (parser->local.delimiter_depth) {
1289				hid_err(device, "unbalanced delimiter at end of report description\n");
1290				goto err;
1291			}
1292
1293			/*
1294			 * fetch initial values in case the device's
1295			 * default multiplier isn't the recommended 1
1296			 */
1297			hid_setup_resolution_multiplier(device);
1298
1299			kfree(parser->collection_stack);
1300			vfree(parser);
1301			device->status |= HID_STAT_PARSED;
1302
1303			return 0;
1304		}
1305	}
1306
1307	hid_err(device, "item fetching failed at offset %u/%u\n",
1308		size - (unsigned int)(end - start), size);
1309err:
1310	kfree(parser->collection_stack);
1311alloc_err:
1312	vfree(parser);
1313	hid_close_report(device);
1314	return ret;
1315}
1316EXPORT_SYMBOL_GPL(hid_open_report);
1317
1318/*
1319 * Convert a signed n-bit integer to signed 32-bit integer. Common
1320 * cases are done through the compiler, the screwed things has to be
1321 * done by hand.
1322 */
1323
1324static s32 snto32(__u32 value, unsigned n)
1325{
1326	if (!value || !n)
1327		return 0;
1328
1329	if (n > 32)
1330		n = 32;
1331
1332	switch (n) {
1333	case 8:  return ((__s8)value);
1334	case 16: return ((__s16)value);
1335	case 32: return ((__s32)value);
1336	}
1337	return value & (1 << (n - 1)) ? value | (~0U << n) : value;
1338}
1339
1340s32 hid_snto32(__u32 value, unsigned n)
1341{
1342	return snto32(value, n);
1343}
1344EXPORT_SYMBOL_GPL(hid_snto32);
1345
1346/*
1347 * Convert a signed 32-bit integer to a signed n-bit integer.
1348 */
1349
1350static u32 s32ton(__s32 value, unsigned n)
1351{
1352	s32 a = value >> (n - 1);
1353	if (a && a != -1)
1354		return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1;
1355	return value & ((1 << n) - 1);
1356}
1357
1358/*
1359 * Extract/implement a data field from/to a little endian report (bit array).
1360 *
1361 * Code sort-of follows HID spec:
1362 *     http://www.usb.org/developers/hidpage/HID1_11.pdf
1363 *
1364 * While the USB HID spec allows unlimited length bit fields in "report
1365 * descriptors", most devices never use more than 16 bits.
1366 * One model of UPS is claimed to report "LINEV" as a 32-bit field.
1367 * Search linux-kernel and linux-usb-devel archives for "hid-core extract".
1368 */
1369
1370static u32 __extract(u8 *report, unsigned offset, int n)
1371{
1372	unsigned int idx = offset / 8;
1373	unsigned int bit_nr = 0;
1374	unsigned int bit_shift = offset % 8;
1375	int bits_to_copy = 8 - bit_shift;
1376	u32 value = 0;
1377	u32 mask = n < 32 ? (1U << n) - 1 : ~0U;
1378
1379	while (n > 0) {
1380		value |= ((u32)report[idx] >> bit_shift) << bit_nr;
1381		n -= bits_to_copy;
1382		bit_nr += bits_to_copy;
1383		bits_to_copy = 8;
1384		bit_shift = 0;
1385		idx++;
1386	}
1387
1388	return value & mask;
1389}
1390
1391u32 hid_field_extract(const struct hid_device *hid, u8 *report,
1392			unsigned offset, unsigned n)
1393{
1394	if (n > 32) {
1395		hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n",
1396			      __func__, n, current->comm);
1397		n = 32;
1398	}
1399
1400	return __extract(report, offset, n);
1401}
1402EXPORT_SYMBOL_GPL(hid_field_extract);
1403
1404/*
1405 * "implement" : set bits in a little endian bit stream.
1406 * Same concepts as "extract" (see comments above).
1407 * The data mangled in the bit stream remains in little endian
1408 * order the whole time. It make more sense to talk about
1409 * endianness of register values by considering a register
1410 * a "cached" copy of the little endian bit stream.
1411 */
1412
1413static void __implement(u8 *report, unsigned offset, int n, u32 value)
1414{
1415	unsigned int idx = offset / 8;
1416	unsigned int bit_shift = offset % 8;
1417	int bits_to_set = 8 - bit_shift;
1418
1419	while (n - bits_to_set >= 0) {
1420		report[idx] &= ~(0xff << bit_shift);
1421		report[idx] |= value << bit_shift;
1422		value >>= bits_to_set;
1423		n -= bits_to_set;
1424		bits_to_set = 8;
1425		bit_shift = 0;
1426		idx++;
1427	}
1428
1429	/* last nibble */
1430	if (n) {
1431		u8 bit_mask = ((1U << n) - 1);
1432		report[idx] &= ~(bit_mask << bit_shift);
1433		report[idx] |= value << bit_shift;
1434	}
1435}
1436
1437static void implement(const struct hid_device *hid, u8 *report,
1438		      unsigned offset, unsigned n, u32 value)
1439{
1440	if (unlikely(n > 32)) {
1441		hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n",
1442			 __func__, n, current->comm);
1443		n = 32;
1444	} else if (n < 32) {
1445		u32 m = (1U << n) - 1;
1446
1447		if (unlikely(value > m)) {
1448			hid_warn(hid,
1449				 "%s() called with too large value %d (n: %d)! (%s)\n",
1450				 __func__, value, n, current->comm);
1451			WARN_ON(1);
1452			value &= m;
1453		}
1454	}
1455
1456	__implement(report, offset, n, value);
1457}
1458
1459/*
1460 * Search an array for a value.
1461 */
1462
1463static int search(__s32 *array, __s32 value, unsigned n)
1464{
1465	while (n--) {
1466		if (*array++ == value)
1467			return 0;
1468	}
1469	return -1;
1470}
1471
1472/**
1473 * hid_match_report - check if driver's raw_event should be called
1474 *
1475 * @hid: hid device
1476 * @report: hid report to match against
1477 *
1478 * compare hid->driver->report_table->report_type to report->type
1479 */
1480static int hid_match_report(struct hid_device *hid, struct hid_report *report)
1481{
1482	const struct hid_report_id *id = hid->driver->report_table;
1483
1484	if (!id) /* NULL means all */
1485		return 1;
1486
1487	for (; id->report_type != HID_TERMINATOR; id++)
1488		if (id->report_type == HID_ANY_ID ||
1489				id->report_type == report->type)
1490			return 1;
1491	return 0;
1492}
1493
1494/**
1495 * hid_match_usage - check if driver's event should be called
1496 *
1497 * @hid: hid device
1498 * @usage: usage to match against
1499 *
1500 * compare hid->driver->usage_table->usage_{type,code} to
1501 * usage->usage_{type,code}
1502 */
1503static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage)
1504{
1505	const struct hid_usage_id *id = hid->driver->usage_table;
1506
1507	if (!id) /* NULL means all */
1508		return 1;
1509
1510	for (; id->usage_type != HID_ANY_ID - 1; id++)
1511		if ((id->usage_hid == HID_ANY_ID ||
1512				id->usage_hid == usage->hid) &&
1513				(id->usage_type == HID_ANY_ID ||
1514				id->usage_type == usage->type) &&
1515				(id->usage_code == HID_ANY_ID ||
1516				 id->usage_code == usage->code))
1517			return 1;
1518	return 0;
1519}
1520
1521static void hid_process_event(struct hid_device *hid, struct hid_field *field,
1522		struct hid_usage *usage, __s32 value, int interrupt)
1523{
1524	struct hid_driver *hdrv = hid->driver;
1525	int ret;
1526
1527	if (!list_empty(&hid->debug_list))
1528		hid_dump_input(hid, usage, value);
1529
1530	if (hdrv && hdrv->event && hid_match_usage(hid, usage)) {
1531		ret = hdrv->event(hid, field, usage, value);
1532		if (ret != 0) {
1533			if (ret < 0)
1534				hid_err(hid, "%s's event failed with %d\n",
1535						hdrv->name, ret);
1536			return;
1537		}
1538	}
1539
1540	if (hid->claimed & HID_CLAIMED_INPUT)
1541		hidinput_hid_event(hid, field, usage, value);
1542	if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event)
1543		hid->hiddev_hid_event(hid, field, usage, value);
1544}
1545
1546/*
1547 * Checks if the given value is valid within this field
1548 */
1549static inline int hid_array_value_is_valid(struct hid_field *field,
1550					   __s32 value)
1551{
1552	__s32 min = field->logical_minimum;
1553
1554	/*
1555	 * Value needs to be between logical min and max, and
1556	 * (value - min) is used as an index in the usage array.
1557	 * This array is of size field->maxusage
1558	 */
1559	return value >= min &&
1560	       value <= field->logical_maximum &&
1561	       value - min < field->maxusage;
1562}
1563
1564/*
1565 * Fetch the field from the data. The field content is stored for next
1566 * report processing (we do differential reporting to the layer).
1567 */
1568static void hid_input_fetch_field(struct hid_device *hid,
1569				  struct hid_field *field,
1570				  __u8 *data)
1571{
1572	unsigned n;
1573	unsigned count = field->report_count;
1574	unsigned offset = field->report_offset;
1575	unsigned size = field->report_size;
1576	__s32 min = field->logical_minimum;
1577	__s32 *value;
1578
1579	value = field->new_value;
1580	memset(value, 0, count * sizeof(__s32));
1581	field->ignored = false;
1582
1583	for (n = 0; n < count; n++) {
1584
1585		value[n] = min < 0 ?
1586			snto32(hid_field_extract(hid, data, offset + n * size,
1587			       size), size) :
1588			hid_field_extract(hid, data, offset + n * size, size);
1589
1590		/* Ignore report if ErrorRollOver */
1591		if (!(field->flags & HID_MAIN_ITEM_VARIABLE) &&
1592		    hid_array_value_is_valid(field, value[n]) &&
1593		    field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) {
1594			field->ignored = true;
1595			return;
1596		}
1597	}
1598}
1599
1600/*
1601 * Process a received variable field.
1602 */
1603
1604static void hid_input_var_field(struct hid_device *hid,
1605				struct hid_field *field,
1606				int interrupt)
1607{
1608	unsigned int count = field->report_count;
1609	__s32 *value = field->new_value;
1610	unsigned int n;
1611
1612	for (n = 0; n < count; n++)
1613		hid_process_event(hid,
1614				  field,
1615				  &field->usage[n],
1616				  value[n],
1617				  interrupt);
1618
1619	memcpy(field->value, value, count * sizeof(__s32));
1620}
1621
1622/*
1623 * Process a received array field. The field content is stored for
1624 * next report processing (we do differential reporting to the layer).
1625 */
1626
1627static void hid_input_array_field(struct hid_device *hid,
1628				  struct hid_field *field,
1629				  int interrupt)
1630{
1631	unsigned int n;
1632	unsigned int count = field->report_count;
1633	__s32 min = field->logical_minimum;
1634	__s32 *value;
1635
1636	value = field->new_value;
1637
1638	/* ErrorRollOver */
1639	if (field->ignored)
1640		return;
1641
1642	for (n = 0; n < count; n++) {
1643		if (hid_array_value_is_valid(field, field->value[n]) &&
1644		    search(value, field->value[n], count))
1645			hid_process_event(hid,
1646					  field,
1647					  &field->usage[field->value[n] - min],
1648					  0,
1649					  interrupt);
1650
1651		if (hid_array_value_is_valid(field, value[n]) &&
1652		    search(field->value, value[n], count))
1653			hid_process_event(hid,
1654					  field,
1655					  &field->usage[value[n] - min],
1656					  1,
1657					  interrupt);
1658	}
1659
1660	memcpy(field->value, value, count * sizeof(__s32));
1661}
1662
1663/*
1664 * Analyse a received report, and fetch the data from it. The field
1665 * content is stored for next report processing (we do differential
1666 * reporting to the layer).
1667 */
1668static void hid_process_report(struct hid_device *hid,
1669			       struct hid_report *report,
1670			       __u8 *data,
1671			       int interrupt)
1672{
1673	unsigned int a;
1674	struct hid_field_entry *entry;
1675	struct hid_field *field;
1676
1677	/* first retrieve all incoming values in data */
1678	for (a = 0; a < report->maxfield; a++)
1679		hid_input_fetch_field(hid, report->field[a], data);
1680
1681	if (!list_empty(&report->field_entry_list)) {
1682		/* INPUT_REPORT, we have a priority list of fields */
1683		list_for_each_entry(entry,
1684				    &report->field_entry_list,
1685				    list) {
1686			field = entry->field;
1687
1688			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1689				hid_process_event(hid,
1690						  field,
1691						  &field->usage[entry->index],
1692						  field->new_value[entry->index],
1693						  interrupt);
1694			else
1695				hid_input_array_field(hid, field, interrupt);
1696		}
1697
1698		/* we need to do the memcpy at the end for var items */
1699		for (a = 0; a < report->maxfield; a++) {
1700			field = report->field[a];
1701
1702			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1703				memcpy(field->value, field->new_value,
1704				       field->report_count * sizeof(__s32));
1705		}
1706	} else {
1707		/* FEATURE_REPORT, regular processing */
1708		for (a = 0; a < report->maxfield; a++) {
1709			field = report->field[a];
1710
1711			if (field->flags & HID_MAIN_ITEM_VARIABLE)
1712				hid_input_var_field(hid, field, interrupt);
1713			else
1714				hid_input_array_field(hid, field, interrupt);
1715		}
1716	}
1717}
1718
1719/*
1720 * Insert a given usage_index in a field in the list
1721 * of processed usages in the report.
1722 *
1723 * The elements of lower priority score are processed
1724 * first.
1725 */
1726static void __hid_insert_field_entry(struct hid_device *hid,
1727				     struct hid_report *report,
1728				     struct hid_field_entry *entry,
1729				     struct hid_field *field,
1730				     unsigned int usage_index)
1731{
1732	struct hid_field_entry *next;
1733
1734	entry->field = field;
1735	entry->index = usage_index;
1736	entry->priority = field->usages_priorities[usage_index];
1737
1738	/* insert the element at the correct position */
1739	list_for_each_entry(next,
1740			    &report->field_entry_list,
1741			    list) {
1742		/*
1743		 * the priority of our element is strictly higher
1744		 * than the next one, insert it before
1745		 */
1746		if (entry->priority > next->priority) {
1747			list_add_tail(&entry->list, &next->list);
1748			return;
1749		}
1750	}
1751
1752	/* lowest priority score: insert at the end */
1753	list_add_tail(&entry->list, &report->field_entry_list);
1754}
1755
1756static void hid_report_process_ordering(struct hid_device *hid,
1757					struct hid_report *report)
1758{
1759	struct hid_field *field;
1760	struct hid_field_entry *entries;
1761	unsigned int a, u, usages;
1762	unsigned int count = 0;
1763
1764	/* count the number of individual fields in the report */
1765	for (a = 0; a < report->maxfield; a++) {
1766		field = report->field[a];
1767
1768		if (field->flags & HID_MAIN_ITEM_VARIABLE)
1769			count += field->report_count;
1770		else
1771			count++;
1772	}
1773
1774	/* allocate the memory to process the fields */
1775	entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
1776	if (!entries)
1777		return;
1778
1779	report->field_entries = entries;
1780
1781	/*
1782	 * walk through all fields in the report and
1783	 * store them by priority order in report->field_entry_list
1784	 *
1785	 * - Var elements are individualized (field + usage_index)
1786	 * - Arrays are taken as one, we can not chose an order for them
1787	 */
1788	usages = 0;
1789	for (a = 0; a < report->maxfield; a++) {
1790		field = report->field[a];
1791
1792		if (field->flags & HID_MAIN_ITEM_VARIABLE) {
1793			for (u = 0; u < field->report_count; u++) {
1794				__hid_insert_field_entry(hid, report,
1795							 &entries[usages],
1796							 field, u);
1797				usages++;
1798			}
1799		} else {
1800			__hid_insert_field_entry(hid, report, &entries[usages],
1801						 field, 0);
1802			usages++;
1803		}
1804	}
1805}
1806
1807static void hid_process_ordering(struct hid_device *hid)
1808{
1809	struct hid_report *report;
1810	struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT];
1811
1812	list_for_each_entry(report, &report_enum->report_list, list)
1813		hid_report_process_ordering(hid, report);
1814}
1815
1816/*
1817 * Output the field into the report.
1818 */
1819
1820static void hid_output_field(const struct hid_device *hid,
1821			     struct hid_field *field, __u8 *data)
1822{
1823	unsigned count = field->report_count;
1824	unsigned offset = field->report_offset;
1825	unsigned size = field->report_size;
1826	unsigned n;
1827
1828	for (n = 0; n < count; n++) {
1829		if (field->logical_minimum < 0)	/* signed values */
1830			implement(hid, data, offset + n * size, size,
1831				  s32ton(field->value[n], size));
1832		else				/* unsigned values */
1833			implement(hid, data, offset + n * size, size,
1834				  field->value[n]);
1835	}
1836}
1837
1838/*
1839 * Compute the size of a report.
1840 */
1841static size_t hid_compute_report_size(struct hid_report *report)
1842{
1843	if (report->size)
1844		return ((report->size - 1) >> 3) + 1;
1845
1846	return 0;
1847}
1848
1849/*
1850 * Create a report. 'data' has to be allocated using
1851 * hid_alloc_report_buf() so that it has proper size.
1852 */
1853
1854void hid_output_report(struct hid_report *report, __u8 *data)
1855{
1856	unsigned n;
1857
1858	if (report->id > 0)
1859		*data++ = report->id;
1860
1861	memset(data, 0, hid_compute_report_size(report));
1862	for (n = 0; n < report->maxfield; n++)
1863		hid_output_field(report->device, report->field[n], data);
1864}
1865EXPORT_SYMBOL_GPL(hid_output_report);
1866
1867/*
1868 * Allocator for buffer that is going to be passed to hid_output_report()
1869 */
1870u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1871{
1872	/*
1873	 * 7 extra bytes are necessary to achieve proper functionality
1874	 * of implement() working on 8 byte chunks
1875	 */
1876
1877	u32 len = hid_report_len(report) + 7;
1878
1879	return kmalloc(len, flags);
1880}
1881EXPORT_SYMBOL_GPL(hid_alloc_report_buf);
1882
1883/*
1884 * Set a field value. The report this field belongs to has to be
1885 * created and transferred to the device, to set this value in the
1886 * device.
1887 */
1888
1889int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
1890{
1891	unsigned size;
1892
1893	if (!field)
1894		return -1;
1895
1896	size = field->report_size;
1897
1898	hid_dump_input(field->report->device, field->usage + offset, value);
1899
1900	if (offset >= field->report_count) {
1901		hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n",
1902				offset, field->report_count);
1903		return -1;
1904	}
1905	if (field->logical_minimum < 0) {
1906		if (value != snto32(s32ton(value, size), size)) {
1907			hid_err(field->report->device, "value %d is out of range\n", value);
1908			return -1;
1909		}
1910	}
1911	field->value[offset] = value;
1912	return 0;
1913}
1914EXPORT_SYMBOL_GPL(hid_set_field);
1915
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1916static struct hid_report *hid_get_report(struct hid_report_enum *report_enum,
1917		const u8 *data)
1918{
1919	struct hid_report *report;
1920	unsigned int n = 0;	/* Normally report number is 0 */
1921
1922	/* Device uses numbered reports, data[0] is report number */
1923	if (report_enum->numbered)
1924		n = *data;
1925
1926	report = report_enum->report_id_hash[n];
1927	if (report == NULL)
1928		dbg_hid("undefined report_id %u received\n", n);
1929
1930	return report;
1931}
1932
1933/*
1934 * Implement a generic .request() callback, using .raw_request()
1935 * DO NOT USE in hid drivers directly, but through hid_hw_request instead.
1936 */
1937int __hid_request(struct hid_device *hid, struct hid_report *report,
1938		enum hid_class_request reqtype)
1939{
1940	char *buf;
1941	int ret;
1942	u32 len;
1943
1944	buf = hid_alloc_report_buf(report, GFP_KERNEL);
1945	if (!buf)
1946		return -ENOMEM;
1947
1948	len = hid_report_len(report);
1949
1950	if (reqtype == HID_REQ_SET_REPORT)
1951		hid_output_report(report, buf);
1952
1953	ret = hid->ll_driver->raw_request(hid, report->id, buf, len,
1954					  report->type, reqtype);
1955	if (ret < 0) {
1956		dbg_hid("unable to complete request: %d\n", ret);
1957		goto out;
1958	}
1959
1960	if (reqtype == HID_REQ_GET_REPORT)
1961		hid_input_report(hid, report->type, buf, ret, 0);
1962
1963	ret = 0;
1964
1965out:
1966	kfree(buf);
1967	return ret;
1968}
1969EXPORT_SYMBOL_GPL(__hid_request);
1970
1971int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
1972			 int interrupt)
1973{
1974	struct hid_report_enum *report_enum = hid->report_enum + type;
1975	struct hid_report *report;
1976	struct hid_driver *hdrv;
1977	int max_buffer_size = HID_MAX_BUFFER_SIZE;
1978	u32 rsize, csize = size;
1979	u8 *cdata = data;
1980	int ret = 0;
1981
1982	report = hid_get_report(report_enum, data);
1983	if (!report)
1984		goto out;
1985
1986	if (report_enum->numbered) {
1987		cdata++;
1988		csize--;
1989	}
1990
1991	rsize = hid_compute_report_size(report);
1992
1993	if (hid->ll_driver->max_buffer_size)
1994		max_buffer_size = hid->ll_driver->max_buffer_size;
1995
1996	if (report_enum->numbered && rsize >= max_buffer_size)
1997		rsize = max_buffer_size - 1;
1998	else if (rsize > max_buffer_size)
1999		rsize = max_buffer_size;
2000
2001	if (csize < rsize) {
2002		dbg_hid("report %d is too short, (%d < %d)\n", report->id,
2003				csize, rsize);
2004		memset(cdata + csize, 0, rsize - csize);
2005	}
2006
2007	if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
2008		hid->hiddev_report_event(hid, report);
2009	if (hid->claimed & HID_CLAIMED_HIDRAW) {
2010		ret = hidraw_report_event(hid, data, size);
2011		if (ret)
2012			goto out;
2013	}
2014
2015	if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) {
2016		hid_process_report(hid, report, cdata, interrupt);
2017		hdrv = hid->driver;
2018		if (hdrv && hdrv->report)
2019			hdrv->report(hid, report);
2020	}
2021
2022	if (hid->claimed & HID_CLAIMED_INPUT)
2023		hidinput_report_event(hid, report);
2024out:
2025	return ret;
2026}
2027EXPORT_SYMBOL_GPL(hid_report_raw_event);
2028
2029/**
2030 * hid_input_report - report data from lower layer (usb, bt...)
2031 *
2032 * @hid: hid device
2033 * @type: HID report type (HID_*_REPORT)
2034 * @data: report contents
2035 * @size: size of data parameter
2036 * @interrupt: distinguish between interrupt and control transfers
2037 *
2038 * This is data entry for lower layers.
2039 */
2040int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size,
2041		     int interrupt)
2042{
2043	struct hid_report_enum *report_enum;
2044	struct hid_driver *hdrv;
2045	struct hid_report *report;
2046	int ret = 0;
2047
2048	if (!hid)
2049		return -ENODEV;
2050
2051	if (down_trylock(&hid->driver_input_lock))
 
 
 
 
2052		return -EBUSY;
 
2053
2054	if (!hid->driver) {
2055		ret = -ENODEV;
2056		goto unlock;
2057	}
2058	report_enum = hid->report_enum + type;
2059	hdrv = hid->driver;
2060
2061	data = dispatch_hid_bpf_device_event(hid, type, data, &size, interrupt);
2062	if (IS_ERR(data)) {
2063		ret = PTR_ERR(data);
2064		goto unlock;
2065	}
2066
2067	if (!size) {
2068		dbg_hid("empty report\n");
2069		ret = -1;
2070		goto unlock;
2071	}
2072
2073	/* Avoid unnecessary overhead if debugfs is disabled */
2074	if (!list_empty(&hid->debug_list))
2075		hid_dump_report(hid, type, data, size);
2076
2077	report = hid_get_report(report_enum, data);
2078
2079	if (!report) {
2080		ret = -1;
2081		goto unlock;
2082	}
2083
2084	if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) {
2085		ret = hdrv->raw_event(hid, report, data, size);
2086		if (ret < 0)
2087			goto unlock;
2088	}
2089
2090	ret = hid_report_raw_event(hid, type, data, size, interrupt);
2091
2092unlock:
2093	up(&hid->driver_input_lock);
 
2094	return ret;
2095}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2096EXPORT_SYMBOL_GPL(hid_input_report);
2097
2098bool hid_match_one_id(const struct hid_device *hdev,
2099		      const struct hid_device_id *id)
2100{
2101	return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) &&
2102		(id->group == HID_GROUP_ANY || id->group == hdev->group) &&
2103		(id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) &&
2104		(id->product == HID_ANY_ID || id->product == hdev->product);
2105}
2106
2107const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
2108		const struct hid_device_id *id)
2109{
2110	for (; id->bus; id++)
2111		if (hid_match_one_id(hdev, id))
2112			return id;
2113
2114	return NULL;
2115}
2116EXPORT_SYMBOL_GPL(hid_match_id);
2117
2118static const struct hid_device_id hid_hiddev_list[] = {
2119	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) },
2120	{ HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) },
2121	{ }
2122};
2123
2124static bool hid_hiddev(struct hid_device *hdev)
2125{
2126	return !!hid_match_id(hdev, hid_hiddev_list);
2127}
2128
2129
2130static ssize_t
2131read_report_descriptor(struct file *filp, struct kobject *kobj,
2132		struct bin_attribute *attr,
2133		char *buf, loff_t off, size_t count)
2134{
2135	struct device *dev = kobj_to_dev(kobj);
2136	struct hid_device *hdev = to_hid_device(dev);
2137
2138	if (off >= hdev->rsize)
2139		return 0;
2140
2141	if (off + count > hdev->rsize)
2142		count = hdev->rsize - off;
2143
2144	memcpy(buf, hdev->rdesc + off, count);
2145
2146	return count;
2147}
2148
2149static ssize_t
2150show_country(struct device *dev, struct device_attribute *attr,
2151		char *buf)
2152{
2153	struct hid_device *hdev = to_hid_device(dev);
2154
2155	return sprintf(buf, "%02x\n", hdev->country & 0xff);
2156}
2157
2158static struct bin_attribute dev_bin_attr_report_desc = {
2159	.attr = { .name = "report_descriptor", .mode = 0444 },
2160	.read = read_report_descriptor,
2161	.size = HID_MAX_DESCRIPTOR_SIZE,
2162};
2163
2164static const struct device_attribute dev_attr_country = {
2165	.attr = { .name = "country", .mode = 0444 },
2166	.show = show_country,
2167};
2168
2169int hid_connect(struct hid_device *hdev, unsigned int connect_mask)
2170{
2171	static const char *types[] = { "Device", "Pointer", "Mouse", "Device",
2172		"Joystick", "Gamepad", "Keyboard", "Keypad",
2173		"Multi-Axis Controller"
2174	};
2175	const char *type, *bus;
2176	char buf[64] = "";
2177	unsigned int i;
2178	int len;
2179	int ret;
2180
2181	ret = hid_bpf_connect_device(hdev);
2182	if (ret)
2183		return ret;
2184
2185	if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE)
2186		connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV);
2187	if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE)
2188		connect_mask |= HID_CONNECT_HIDINPUT_FORCE;
2189	if (hdev->bus != BUS_USB)
2190		connect_mask &= ~HID_CONNECT_HIDDEV;
2191	if (hid_hiddev(hdev))
2192		connect_mask |= HID_CONNECT_HIDDEV_FORCE;
2193
2194	if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev,
2195				connect_mask & HID_CONNECT_HIDINPUT_FORCE))
2196		hdev->claimed |= HID_CLAIMED_INPUT;
2197
2198	if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect &&
2199			!hdev->hiddev_connect(hdev,
2200				connect_mask & HID_CONNECT_HIDDEV_FORCE))
2201		hdev->claimed |= HID_CLAIMED_HIDDEV;
2202	if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev))
2203		hdev->claimed |= HID_CLAIMED_HIDRAW;
2204
2205	if (connect_mask & HID_CONNECT_DRIVER)
2206		hdev->claimed |= HID_CLAIMED_DRIVER;
2207
2208	/* Drivers with the ->raw_event callback set are not required to connect
2209	 * to any other listener. */
2210	if (!hdev->claimed && !hdev->driver->raw_event) {
2211		hid_err(hdev, "device has no listeners, quitting\n");
2212		return -ENODEV;
2213	}
2214
2215	hid_process_ordering(hdev);
2216
2217	if ((hdev->claimed & HID_CLAIMED_INPUT) &&
2218			(connect_mask & HID_CONNECT_FF) && hdev->ff_init)
2219		hdev->ff_init(hdev);
2220
2221	len = 0;
2222	if (hdev->claimed & HID_CLAIMED_INPUT)
2223		len += sprintf(buf + len, "input");
2224	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2225		len += sprintf(buf + len, "%shiddev%d", len ? "," : "",
2226				((struct hiddev *)hdev->hiddev)->minor);
2227	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2228		len += sprintf(buf + len, "%shidraw%d", len ? "," : "",
2229				((struct hidraw *)hdev->hidraw)->minor);
2230
2231	type = "Device";
2232	for (i = 0; i < hdev->maxcollection; i++) {
2233		struct hid_collection *col = &hdev->collection[i];
2234		if (col->type == HID_COLLECTION_APPLICATION &&
2235		   (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK &&
2236		   (col->usage & 0xffff) < ARRAY_SIZE(types)) {
2237			type = types[col->usage & 0xffff];
2238			break;
2239		}
2240	}
2241
2242	switch (hdev->bus) {
2243	case BUS_USB:
2244		bus = "USB";
2245		break;
2246	case BUS_BLUETOOTH:
2247		bus = "BLUETOOTH";
2248		break;
2249	case BUS_I2C:
2250		bus = "I2C";
2251		break;
2252	case BUS_VIRTUAL:
2253		bus = "VIRTUAL";
2254		break;
2255	case BUS_INTEL_ISHTP:
2256	case BUS_AMD_SFH:
2257		bus = "SENSOR HUB";
2258		break;
2259	default:
2260		bus = "<UNKNOWN>";
2261	}
2262
2263	ret = device_create_file(&hdev->dev, &dev_attr_country);
2264	if (ret)
2265		hid_warn(hdev,
2266			 "can't create sysfs country code attribute err: %d\n", ret);
2267
2268	hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n",
2269		 buf, bus, hdev->version >> 8, hdev->version & 0xff,
2270		 type, hdev->name, hdev->phys);
2271
2272	return 0;
2273}
2274EXPORT_SYMBOL_GPL(hid_connect);
2275
2276void hid_disconnect(struct hid_device *hdev)
2277{
2278	device_remove_file(&hdev->dev, &dev_attr_country);
2279	if (hdev->claimed & HID_CLAIMED_INPUT)
2280		hidinput_disconnect(hdev);
2281	if (hdev->claimed & HID_CLAIMED_HIDDEV)
2282		hdev->hiddev_disconnect(hdev);
2283	if (hdev->claimed & HID_CLAIMED_HIDRAW)
2284		hidraw_disconnect(hdev);
2285	hdev->claimed = 0;
2286
2287	hid_bpf_disconnect_device(hdev);
2288}
2289EXPORT_SYMBOL_GPL(hid_disconnect);
2290
2291/**
2292 * hid_hw_start - start underlying HW
2293 * @hdev: hid device
2294 * @connect_mask: which outputs to connect, see HID_CONNECT_*
2295 *
2296 * Call this in probe function *after* hid_parse. This will setup HW
2297 * buffers and start the device (if not defeirred to device open).
2298 * hid_hw_stop must be called if this was successful.
2299 */
2300int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask)
2301{
2302	int error;
2303
2304	error = hdev->ll_driver->start(hdev);
2305	if (error)
2306		return error;
2307
2308	if (connect_mask) {
2309		error = hid_connect(hdev, connect_mask);
2310		if (error) {
2311			hdev->ll_driver->stop(hdev);
2312			return error;
2313		}
2314	}
2315
2316	return 0;
2317}
2318EXPORT_SYMBOL_GPL(hid_hw_start);
2319
2320/**
2321 * hid_hw_stop - stop underlying HW
2322 * @hdev: hid device
2323 *
2324 * This is usually called from remove function or from probe when something
2325 * failed and hid_hw_start was called already.
2326 */
2327void hid_hw_stop(struct hid_device *hdev)
2328{
2329	hid_disconnect(hdev);
2330	hdev->ll_driver->stop(hdev);
2331}
2332EXPORT_SYMBOL_GPL(hid_hw_stop);
2333
2334/**
2335 * hid_hw_open - signal underlying HW to start delivering events
2336 * @hdev: hid device
2337 *
2338 * Tell underlying HW to start delivering events from the device.
2339 * This function should be called sometime after successful call
2340 * to hid_hw_start().
2341 */
2342int hid_hw_open(struct hid_device *hdev)
2343{
2344	int ret;
2345
2346	ret = mutex_lock_killable(&hdev->ll_open_lock);
2347	if (ret)
2348		return ret;
2349
2350	if (!hdev->ll_open_count++) {
2351		ret = hdev->ll_driver->open(hdev);
2352		if (ret)
2353			hdev->ll_open_count--;
2354	}
2355
2356	mutex_unlock(&hdev->ll_open_lock);
2357	return ret;
2358}
2359EXPORT_SYMBOL_GPL(hid_hw_open);
2360
2361/**
2362 * hid_hw_close - signal underlaying HW to stop delivering events
2363 *
2364 * @hdev: hid device
2365 *
2366 * This function indicates that we are not interested in the events
2367 * from this device anymore. Delivery of events may or may not stop,
2368 * depending on the number of users still outstanding.
2369 */
2370void hid_hw_close(struct hid_device *hdev)
2371{
2372	mutex_lock(&hdev->ll_open_lock);
2373	if (!--hdev->ll_open_count)
2374		hdev->ll_driver->close(hdev);
2375	mutex_unlock(&hdev->ll_open_lock);
2376}
2377EXPORT_SYMBOL_GPL(hid_hw_close);
2378
2379/**
2380 * hid_hw_request - send report request to device
2381 *
2382 * @hdev: hid device
2383 * @report: report to send
2384 * @reqtype: hid request type
2385 */
2386void hid_hw_request(struct hid_device *hdev,
2387		    struct hid_report *report, enum hid_class_request reqtype)
2388{
2389	if (hdev->ll_driver->request)
2390		return hdev->ll_driver->request(hdev, report, reqtype);
2391
2392	__hid_request(hdev, report, reqtype);
2393}
2394EXPORT_SYMBOL_GPL(hid_hw_request);
2395
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2396/**
2397 * hid_hw_raw_request - send report request to device
2398 *
2399 * @hdev: hid device
2400 * @reportnum: report ID
2401 * @buf: in/out data to transfer
2402 * @len: length of buf
2403 * @rtype: HID report type
2404 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT
2405 *
2406 * Return: count of data transferred, negative if error
2407 *
2408 * Same behavior as hid_hw_request, but with raw buffers instead.
2409 */
2410int hid_hw_raw_request(struct hid_device *hdev,
2411		       unsigned char reportnum, __u8 *buf,
2412		       size_t len, enum hid_report_type rtype, enum hid_class_request reqtype)
2413{
 
 
 
 
 
 
 
2414	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
 
2415
2416	if (hdev->ll_driver->max_buffer_size)
2417		max_buffer_size = hdev->ll_driver->max_buffer_size;
2418
2419	if (len < 1 || len > max_buffer_size || !buf)
2420		return -EINVAL;
2421
2422	return hdev->ll_driver->raw_request(hdev, reportnum, buf, len,
2423					    rtype, reqtype);
 
 
 
 
 
 
2424}
2425EXPORT_SYMBOL_GPL(hid_hw_raw_request);
2426
2427/**
2428 * hid_hw_output_report - send output report to device
2429 *
2430 * @hdev: hid device
2431 * @buf: raw data to transfer
2432 * @len: length of buf
2433 *
2434 * Return: count of data transferred, negative if error
2435 */
2436int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len)
2437{
2438	unsigned int max_buffer_size = HID_MAX_BUFFER_SIZE;
2439
2440	if (hdev->ll_driver->max_buffer_size)
2441		max_buffer_size = hdev->ll_driver->max_buffer_size;
2442
2443	if (len < 1 || len > max_buffer_size || !buf)
2444		return -EINVAL;
2445
2446	if (hdev->ll_driver->output_report)
2447		return hdev->ll_driver->output_report(hdev, buf, len);
2448
2449	return -ENOSYS;
2450}
2451EXPORT_SYMBOL_GPL(hid_hw_output_report);
2452
2453#ifdef CONFIG_PM
2454int hid_driver_suspend(struct hid_device *hdev, pm_message_t state)
2455{
2456	if (hdev->driver && hdev->driver->suspend)
2457		return hdev->driver->suspend(hdev, state);
2458
2459	return 0;
2460}
2461EXPORT_SYMBOL_GPL(hid_driver_suspend);
2462
2463int hid_driver_reset_resume(struct hid_device *hdev)
2464{
2465	if (hdev->driver && hdev->driver->reset_resume)
2466		return hdev->driver->reset_resume(hdev);
2467
2468	return 0;
2469}
2470EXPORT_SYMBOL_GPL(hid_driver_reset_resume);
2471
2472int hid_driver_resume(struct hid_device *hdev)
2473{
2474	if (hdev->driver && hdev->driver->resume)
2475		return hdev->driver->resume(hdev);
2476
2477	return 0;
2478}
2479EXPORT_SYMBOL_GPL(hid_driver_resume);
2480#endif /* CONFIG_PM */
2481
2482struct hid_dynid {
2483	struct list_head list;
2484	struct hid_device_id id;
2485};
2486
2487/**
2488 * new_id_store - add a new HID device ID to this driver and re-probe devices
2489 * @drv: target device driver
2490 * @buf: buffer for scanning device ID data
2491 * @count: input size
2492 *
2493 * Adds a new dynamic hid device ID to this driver,
2494 * and causes the driver to probe for all devices again.
2495 */
2496static ssize_t new_id_store(struct device_driver *drv, const char *buf,
2497		size_t count)
2498{
2499	struct hid_driver *hdrv = to_hid_driver(drv);
2500	struct hid_dynid *dynid;
2501	__u32 bus, vendor, product;
2502	unsigned long driver_data = 0;
2503	int ret;
2504
2505	ret = sscanf(buf, "%x %x %x %lx",
2506			&bus, &vendor, &product, &driver_data);
2507	if (ret < 3)
2508		return -EINVAL;
2509
2510	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
2511	if (!dynid)
2512		return -ENOMEM;
2513
2514	dynid->id.bus = bus;
2515	dynid->id.group = HID_GROUP_ANY;
2516	dynid->id.vendor = vendor;
2517	dynid->id.product = product;
2518	dynid->id.driver_data = driver_data;
2519
2520	spin_lock(&hdrv->dyn_lock);
2521	list_add_tail(&dynid->list, &hdrv->dyn_list);
2522	spin_unlock(&hdrv->dyn_lock);
2523
2524	ret = driver_attach(&hdrv->driver);
2525
2526	return ret ? : count;
2527}
2528static DRIVER_ATTR_WO(new_id);
2529
2530static struct attribute *hid_drv_attrs[] = {
2531	&driver_attr_new_id.attr,
2532	NULL,
2533};
2534ATTRIBUTE_GROUPS(hid_drv);
2535
2536static void hid_free_dynids(struct hid_driver *hdrv)
2537{
2538	struct hid_dynid *dynid, *n;
2539
2540	spin_lock(&hdrv->dyn_lock);
2541	list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) {
2542		list_del(&dynid->list);
2543		kfree(dynid);
2544	}
2545	spin_unlock(&hdrv->dyn_lock);
2546}
2547
2548const struct hid_device_id *hid_match_device(struct hid_device *hdev,
2549					     struct hid_driver *hdrv)
2550{
2551	struct hid_dynid *dynid;
2552
2553	spin_lock(&hdrv->dyn_lock);
2554	list_for_each_entry(dynid, &hdrv->dyn_list, list) {
2555		if (hid_match_one_id(hdev, &dynid->id)) {
2556			spin_unlock(&hdrv->dyn_lock);
2557			return &dynid->id;
2558		}
2559	}
2560	spin_unlock(&hdrv->dyn_lock);
2561
2562	return hid_match_id(hdev, hdrv->id_table);
2563}
2564EXPORT_SYMBOL_GPL(hid_match_device);
2565
2566static int hid_bus_match(struct device *dev, struct device_driver *drv)
2567{
2568	struct hid_driver *hdrv = to_hid_driver(drv);
2569	struct hid_device *hdev = to_hid_device(dev);
2570
2571	return hid_match_device(hdev, hdrv) != NULL;
2572}
2573
2574/**
2575 * hid_compare_device_paths - check if both devices share the same path
2576 * @hdev_a: hid device
2577 * @hdev_b: hid device
2578 * @separator: char to use as separator
2579 *
2580 * Check if two devices share the same path up to the last occurrence of
2581 * the separator char. Both paths must exist (i.e., zero-length paths
2582 * don't match).
2583 */
2584bool hid_compare_device_paths(struct hid_device *hdev_a,
2585			      struct hid_device *hdev_b, char separator)
2586{
2587	int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys;
2588	int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys;
2589
2590	if (n1 != n2 || n1 <= 0 || n2 <= 0)
2591		return false;
2592
2593	return !strncmp(hdev_a->phys, hdev_b->phys, n1);
2594}
2595EXPORT_SYMBOL_GPL(hid_compare_device_paths);
2596
2597static bool hid_check_device_match(struct hid_device *hdev,
2598				   struct hid_driver *hdrv,
2599				   const struct hid_device_id **id)
2600{
2601	*id = hid_match_device(hdev, hdrv);
2602	if (!*id)
2603		return false;
2604
2605	if (hdrv->match)
2606		return hdrv->match(hdev, hid_ignore_special_drivers);
2607
2608	/*
2609	 * hid-generic implements .match(), so we must be dealing with a
2610	 * different HID driver here, and can simply check if
2611	 * hid_ignore_special_drivers is set or not.
 
2612	 */
2613	return !hid_ignore_special_drivers;
2614}
2615
2616static int __hid_device_probe(struct hid_device *hdev, struct hid_driver *hdrv)
2617{
2618	const struct hid_device_id *id;
2619	int ret;
2620
 
 
 
 
 
 
 
 
 
 
 
 
2621	if (!hid_check_device_match(hdev, hdrv, &id))
2622		return -ENODEV;
2623
2624	hdev->devres_group_id = devres_open_group(&hdev->dev, NULL, GFP_KERNEL);
2625	if (!hdev->devres_group_id)
2626		return -ENOMEM;
2627
2628	/* reset the quirks that has been previously set */
2629	hdev->quirks = hid_lookup_quirk(hdev);
2630	hdev->driver = hdrv;
2631
2632	if (hdrv->probe) {
2633		ret = hdrv->probe(hdev, id);
2634	} else { /* default probe */
2635		ret = hid_open_report(hdev);
2636		if (!ret)
2637			ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
2638	}
2639
2640	/*
2641	 * Note that we are not closing the devres group opened above so
2642	 * even resources that were attached to the device after probe is
2643	 * run are released when hid_device_remove() is executed. This is
2644	 * needed as some drivers would allocate additional resources,
2645	 * for example when updating firmware.
2646	 */
2647
2648	if (ret) {
2649		devres_release_group(&hdev->dev, hdev->devres_group_id);
2650		hid_close_report(hdev);
2651		hdev->driver = NULL;
2652	}
2653
2654	return ret;
2655}
2656
2657static int hid_device_probe(struct device *dev)
2658{
2659	struct hid_device *hdev = to_hid_device(dev);
2660	struct hid_driver *hdrv = to_hid_driver(dev->driver);
2661	int ret = 0;
2662
2663	if (down_interruptible(&hdev->driver_input_lock))
2664		return -EINTR;
2665
2666	hdev->io_started = false;
2667	clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
2668
2669	if (!hdev->driver)
2670		ret = __hid_device_probe(hdev, hdrv);
2671
2672	if (!hdev->io_started)
2673		up(&hdev->driver_input_lock);
2674
2675	return ret;
2676}
2677
2678static void hid_device_remove(struct device *dev)
2679{
2680	struct hid_device *hdev = to_hid_device(dev);
2681	struct hid_driver *hdrv;
2682
2683	down(&hdev->driver_input_lock);
2684	hdev->io_started = false;
2685
2686	hdrv = hdev->driver;
2687	if (hdrv) {
2688		if (hdrv->remove)
2689			hdrv->remove(hdev);
2690		else /* default remove */
2691			hid_hw_stop(hdev);
2692
2693		/* Release all devres resources allocated by the driver */
2694		devres_release_group(&hdev->dev, hdev->devres_group_id);
2695
2696		hid_close_report(hdev);
2697		hdev->driver = NULL;
2698	}
2699
2700	if (!hdev->io_started)
2701		up(&hdev->driver_input_lock);
2702}
2703
2704static ssize_t modalias_show(struct device *dev, struct device_attribute *a,
2705			     char *buf)
2706{
2707	struct hid_device *hdev = container_of(dev, struct hid_device, dev);
2708
2709	return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n",
2710			 hdev->bus, hdev->group, hdev->vendor, hdev->product);
2711}
2712static DEVICE_ATTR_RO(modalias);
2713
2714static struct attribute *hid_dev_attrs[] = {
2715	&dev_attr_modalias.attr,
2716	NULL,
2717};
2718static struct bin_attribute *hid_dev_bin_attrs[] = {
2719	&dev_bin_attr_report_desc,
2720	NULL
2721};
2722static const struct attribute_group hid_dev_group = {
2723	.attrs = hid_dev_attrs,
2724	.bin_attrs = hid_dev_bin_attrs,
2725};
2726__ATTRIBUTE_GROUPS(hid_dev);
2727
2728static int hid_uevent(const struct device *dev, struct kobj_uevent_env *env)
2729{
2730	const struct hid_device *hdev = to_hid_device(dev);
2731
2732	if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X",
2733			hdev->bus, hdev->vendor, hdev->product))
2734		return -ENOMEM;
2735
2736	if (add_uevent_var(env, "HID_NAME=%s", hdev->name))
2737		return -ENOMEM;
2738
2739	if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys))
2740		return -ENOMEM;
2741
2742	if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq))
2743		return -ENOMEM;
2744
2745	if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X",
2746			   hdev->bus, hdev->group, hdev->vendor, hdev->product))
2747		return -ENOMEM;
2748
2749	return 0;
2750}
2751
2752const struct bus_type hid_bus_type = {
2753	.name		= "hid",
2754	.dev_groups	= hid_dev_groups,
2755	.drv_groups	= hid_drv_groups,
2756	.match		= hid_bus_match,
2757	.probe		= hid_device_probe,
2758	.remove		= hid_device_remove,
2759	.uevent		= hid_uevent,
2760};
2761EXPORT_SYMBOL(hid_bus_type);
2762
2763int hid_add_device(struct hid_device *hdev)
2764{
2765	static atomic_t id = ATOMIC_INIT(0);
2766	int ret;
2767
2768	if (WARN_ON(hdev->status & HID_STAT_ADDED))
2769		return -EBUSY;
2770
2771	hdev->quirks = hid_lookup_quirk(hdev);
2772
2773	/* we need to kill them here, otherwise they will stay allocated to
2774	 * wait for coming driver */
2775	if (hid_ignore(hdev))
2776		return -ENODEV;
2777
2778	/*
2779	 * Check for the mandatory transport channel.
2780	 */
2781	 if (!hdev->ll_driver->raw_request) {
2782		hid_err(hdev, "transport driver missing .raw_request()\n");
2783		return -EINVAL;
2784	 }
2785
2786	/*
2787	 * Read the device report descriptor once and use as template
2788	 * for the driver-specific modifications.
2789	 */
2790	ret = hdev->ll_driver->parse(hdev);
2791	if (ret)
2792		return ret;
2793	if (!hdev->dev_rdesc)
2794		return -ENODEV;
2795
2796	/*
2797	 * Scan generic devices for group information
2798	 */
2799	if (hid_ignore_special_drivers) {
2800		hdev->group = HID_GROUP_GENERIC;
2801	} else if (!hdev->group &&
2802		   !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) {
2803		ret = hid_scan_report(hdev);
2804		if (ret)
2805			hid_warn(hdev, "bad device descriptor (%d)\n", ret);
2806	}
2807
2808	hdev->id = atomic_inc_return(&id);
2809
2810	/* XXX hack, any other cleaner solution after the driver core
2811	 * is converted to allow more than 20 bytes as the device name? */
2812	dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
2813		     hdev->vendor, hdev->product, hdev->id);
2814
2815	hid_debug_register(hdev, dev_name(&hdev->dev));
2816	ret = device_add(&hdev->dev);
2817	if (!ret)
2818		hdev->status |= HID_STAT_ADDED;
2819	else
2820		hid_debug_unregister(hdev);
2821
2822	return ret;
2823}
2824EXPORT_SYMBOL_GPL(hid_add_device);
2825
2826/**
2827 * hid_allocate_device - allocate new hid device descriptor
2828 *
2829 * Allocate and initialize hid device, so that hid_destroy_device might be
2830 * used to free it.
2831 *
2832 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded
2833 * error value.
2834 */
2835struct hid_device *hid_allocate_device(void)
2836{
2837	struct hid_device *hdev;
2838	int ret = -ENOMEM;
2839
2840	hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
2841	if (hdev == NULL)
2842		return ERR_PTR(ret);
2843
2844	device_initialize(&hdev->dev);
2845	hdev->dev.release = hid_device_release;
2846	hdev->dev.bus = &hid_bus_type;
2847	device_enable_async_suspend(&hdev->dev);
2848
2849	hid_close_report(hdev);
2850
2851	init_waitqueue_head(&hdev->debug_wait);
2852	INIT_LIST_HEAD(&hdev->debug_list);
2853	spin_lock_init(&hdev->debug_list_lock);
2854	sema_init(&hdev->driver_input_lock, 1);
2855	mutex_init(&hdev->ll_open_lock);
2856	kref_init(&hdev->ref);
2857
2858	hid_bpf_device_init(hdev);
 
 
2859
2860	return hdev;
 
 
 
 
2861}
2862EXPORT_SYMBOL_GPL(hid_allocate_device);
2863
2864static void hid_remove_device(struct hid_device *hdev)
2865{
2866	if (hdev->status & HID_STAT_ADDED) {
2867		device_del(&hdev->dev);
2868		hid_debug_unregister(hdev);
2869		hdev->status &= ~HID_STAT_ADDED;
2870	}
 
2871	kfree(hdev->dev_rdesc);
2872	hdev->dev_rdesc = NULL;
2873	hdev->dev_rsize = 0;
 
2874}
2875
2876/**
2877 * hid_destroy_device - free previously allocated device
2878 *
2879 * @hdev: hid device
2880 *
2881 * If you allocate hid_device through hid_allocate_device, you should ever
2882 * free by this function.
2883 */
2884void hid_destroy_device(struct hid_device *hdev)
2885{
2886	hid_bpf_destroy_device(hdev);
2887	hid_remove_device(hdev);
2888	put_device(&hdev->dev);
2889}
2890EXPORT_SYMBOL_GPL(hid_destroy_device);
2891
2892
2893static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
2894{
2895	struct hid_driver *hdrv = data;
2896	struct hid_device *hdev = to_hid_device(dev);
2897
2898	if (hdev->driver == hdrv &&
2899	    !hdrv->match(hdev, hid_ignore_special_drivers) &&
2900	    !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
2901		return device_reprobe(dev);
2902
2903	return 0;
2904}
2905
2906static int __hid_bus_driver_added(struct device_driver *drv, void *data)
2907{
2908	struct hid_driver *hdrv = to_hid_driver(drv);
2909
2910	if (hdrv->match) {
2911		bus_for_each_dev(&hid_bus_type, NULL, hdrv,
2912				 __hid_bus_reprobe_drivers);
2913	}
2914
2915	return 0;
2916}
2917
2918static int __bus_removed_driver(struct device_driver *drv, void *data)
2919{
2920	return bus_rescan_devices(&hid_bus_type);
2921}
2922
2923int __hid_register_driver(struct hid_driver *hdrv, struct module *owner,
2924		const char *mod_name)
2925{
2926	int ret;
2927
2928	hdrv->driver.name = hdrv->name;
2929	hdrv->driver.bus = &hid_bus_type;
2930	hdrv->driver.owner = owner;
2931	hdrv->driver.mod_name = mod_name;
2932
2933	INIT_LIST_HEAD(&hdrv->dyn_list);
2934	spin_lock_init(&hdrv->dyn_lock);
2935
2936	ret = driver_register(&hdrv->driver);
2937
2938	if (ret == 0)
2939		bus_for_each_drv(&hid_bus_type, NULL, NULL,
2940				 __hid_bus_driver_added);
2941
2942	return ret;
2943}
2944EXPORT_SYMBOL_GPL(__hid_register_driver);
2945
2946void hid_unregister_driver(struct hid_driver *hdrv)
2947{
2948	driver_unregister(&hdrv->driver);
2949	hid_free_dynids(hdrv);
2950
2951	bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver);
2952}
2953EXPORT_SYMBOL_GPL(hid_unregister_driver);
2954
2955int hid_check_keys_pressed(struct hid_device *hid)
2956{
2957	struct hid_input *hidinput;
2958	int i;
2959
2960	if (!(hid->claimed & HID_CLAIMED_INPUT))
2961		return 0;
2962
2963	list_for_each_entry(hidinput, &hid->inputs, list) {
2964		for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++)
2965			if (hidinput->input->key[i])
2966				return 1;
2967	}
2968
2969	return 0;
2970}
2971EXPORT_SYMBOL_GPL(hid_check_keys_pressed);
2972
2973#ifdef CONFIG_HID_BPF
2974static struct hid_bpf_ops hid_ops = {
2975	.hid_get_report = hid_get_report,
2976	.hid_hw_raw_request = hid_hw_raw_request,
 
 
2977	.owner = THIS_MODULE,
2978	.bus_type = &hid_bus_type,
2979};
2980#endif
2981
2982static int __init hid_init(void)
2983{
2984	int ret;
2985
2986	ret = bus_register(&hid_bus_type);
2987	if (ret) {
2988		pr_err("can't register hid bus\n");
2989		goto err;
2990	}
2991
2992#ifdef CONFIG_HID_BPF
2993	hid_bpf_ops = &hid_ops;
2994#endif
2995
2996	ret = hidraw_init();
2997	if (ret)
2998		goto err_bus;
2999
3000	hid_debug_init();
3001
3002	return 0;
3003err_bus:
3004	bus_unregister(&hid_bus_type);
3005err:
3006	return ret;
3007}
3008
3009static void __exit hid_exit(void)
3010{
3011#ifdef CONFIG_HID_BPF
3012	hid_bpf_ops = NULL;
3013#endif
3014	hid_debug_exit();
3015	hidraw_exit();
3016	bus_unregister(&hid_bus_type);
3017	hid_quirks_exit(HID_BUS_ANY);
3018}
3019
3020module_init(hid_init);
3021module_exit(hid_exit);
3022
3023MODULE_AUTHOR("Andreas Gal");
3024MODULE_AUTHOR("Vojtech Pavlik");
3025MODULE_AUTHOR("Jiri Kosina");
 
3026MODULE_LICENSE("GPL");