1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * HID support for Linux 4 * 5 * Copyright (c) 1999 Andreas Gal 6 * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> 7 * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc 8 * Copyright (c) 2006-2012 Jiri Kosina 9 */ 10 11 /* 12 */ 13 14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 15 16 #include <linux/module.h> 17 #include <linux/slab.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/list.h> 21 #include <linux/mm.h> 22 #include <linux/spinlock.h> 23 #include <asm/unaligned.h> 24 #include <asm/byteorder.h> 25 #include <linux/input.h> 26 #include <linux/wait.h> 27 #include <linux/vmalloc.h> 28 #include <linux/sched.h> 29 #include <linux/semaphore.h> 30 31 #include <linux/hid.h> 32 #include <linux/hiddev.h> 33 #include <linux/hid-debug.h> 34 #include <linux/hidraw.h> 35 36 #include "hid-ids.h" 37 38 /* 39 * Version Information 40 */ 41 42 #define DRIVER_DESC "HID core driver" 43 44 int hid_debug = 0; 45 module_param_named(debug, hid_debug, int, 0600); 46 MODULE_PARM_DESC(debug, "toggle HID debugging messages"); 47 EXPORT_SYMBOL_GPL(hid_debug); 48 49 static int hid_ignore_special_drivers = 0; 50 module_param_named(ignore_special_drivers, hid_ignore_special_drivers, int, 0600); 51 MODULE_PARM_DESC(ignore_special_drivers, "Ignore any special drivers and handle all devices by generic driver"); 52 53 /* 54 * Register a new report for a device. 55 */ 56 57 struct hid_report *hid_register_report(struct hid_device *device, 58 enum hid_report_type type, unsigned int id, 59 unsigned int application) 60 { 61 struct hid_report_enum *report_enum = device->report_enum + type; 62 struct hid_report *report; 63 64 if (id >= HID_MAX_IDS) 65 return NULL; 66 if (report_enum->report_id_hash[id]) 67 return report_enum->report_id_hash[id]; 68 69 report = kzalloc(sizeof(struct hid_report), GFP_KERNEL); 70 if (!report) 71 return NULL; 72 73 if (id != 0) 74 report_enum->numbered = 1; 75 76 report->id = id; 77 report->type = type; 78 report->size = 0; 79 report->device = device; 80 report->application = application; 81 report_enum->report_id_hash[id] = report; 82 83 list_add_tail(&report->list, &report_enum->report_list); 84 INIT_LIST_HEAD(&report->field_entry_list); 85 86 return report; 87 } 88 EXPORT_SYMBOL_GPL(hid_register_report); 89 90 /* 91 * Register a new field for this report. 92 */ 93 94 static struct hid_field *hid_register_field(struct hid_report *report, unsigned usages) 95 { 96 struct hid_field *field; 97 98 if (report->maxfield == HID_MAX_FIELDS) { 99 hid_err(report->device, "too many fields in report\n"); 100 return NULL; 101 } 102 103 field = kzalloc((sizeof(struct hid_field) + 104 usages * sizeof(struct hid_usage) + 105 3 * usages * sizeof(unsigned int)), GFP_KERNEL); 106 if (!field) 107 return NULL; 108 109 field->index = report->maxfield++; 110 report->field[field->index] = field; 111 field->usage = (struct hid_usage *)(field + 1); 112 field->value = (s32 *)(field->usage + usages); 113 field->new_value = (s32 *)(field->value + usages); 114 field->usages_priorities = (s32 *)(field->new_value + usages); 115 field->report = report; 116 117 return field; 118 } 119 120 /* 121 * Open a collection. The type/usage is pushed on the stack. 122 */ 123 124 static int open_collection(struct hid_parser *parser, unsigned type) 125 { 126 struct hid_collection *collection; 127 unsigned usage; 128 int collection_index; 129 130 usage = parser->local.usage[0]; 131 132 if (parser->collection_stack_ptr == parser->collection_stack_size) { 133 unsigned int *collection_stack; 134 unsigned int new_size = parser->collection_stack_size + 135 HID_COLLECTION_STACK_SIZE; 136 137 collection_stack = krealloc(parser->collection_stack, 138 new_size * sizeof(unsigned int), 139 GFP_KERNEL); 140 if (!collection_stack) 141 return -ENOMEM; 142 143 parser->collection_stack = collection_stack; 144 parser->collection_stack_size = new_size; 145 } 146 147 if (parser->device->maxcollection == parser->device->collection_size) { 148 collection = kmalloc( 149 array3_size(sizeof(struct hid_collection), 150 parser->device->collection_size, 151 2), 152 GFP_KERNEL); 153 if (collection == NULL) { 154 hid_err(parser->device, "failed to reallocate collection array\n"); 155 return -ENOMEM; 156 } 157 memcpy(collection, parser->device->collection, 158 sizeof(struct hid_collection) * 159 parser->device->collection_size); 160 memset(collection + parser->device->collection_size, 0, 161 sizeof(struct hid_collection) * 162 parser->device->collection_size); 163 kfree(parser->device->collection); 164 parser->device->collection = collection; 165 parser->device->collection_size *= 2; 166 } 167 168 parser->collection_stack[parser->collection_stack_ptr++] = 169 parser->device->maxcollection; 170 171 collection_index = parser->device->maxcollection++; 172 collection = parser->device->collection + collection_index; 173 collection->type = type; 174 collection->usage = usage; 175 collection->level = parser->collection_stack_ptr - 1; 176 collection->parent_idx = (collection->level == 0) ? -1 : 177 parser->collection_stack[collection->level - 1]; 178 179 if (type == HID_COLLECTION_APPLICATION) 180 parser->device->maxapplication++; 181 182 return 0; 183 } 184 185 /* 186 * Close a collection. 187 */ 188 189 static int close_collection(struct hid_parser *parser) 190 { 191 if (!parser->collection_stack_ptr) { 192 hid_err(parser->device, "collection stack underflow\n"); 193 return -EINVAL; 194 } 195 parser->collection_stack_ptr--; 196 return 0; 197 } 198 199 /* 200 * Climb up the stack, search for the specified collection type 201 * and return the usage. 202 */ 203 204 static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type) 205 { 206 struct hid_collection *collection = parser->device->collection; 207 int n; 208 209 for (n = parser->collection_stack_ptr - 1; n >= 0; n--) { 210 unsigned index = parser->collection_stack[n]; 211 if (collection[index].type == type) 212 return collection[index].usage; 213 } 214 return 0; /* we know nothing about this usage type */ 215 } 216 217 /* 218 * Concatenate usage which defines 16 bits or less with the 219 * currently defined usage page to form a 32 bit usage 220 */ 221 222 static void complete_usage(struct hid_parser *parser, unsigned int index) 223 { 224 parser->local.usage[index] &= 0xFFFF; 225 parser->local.usage[index] |= 226 (parser->global.usage_page & 0xFFFF) << 16; 227 } 228 229 /* 230 * Add a usage to the temporary parser table. 231 */ 232 233 static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size) 234 { 235 if (parser->local.usage_index >= HID_MAX_USAGES) { 236 hid_err(parser->device, "usage index exceeded\n"); 237 return -1; 238 } 239 parser->local.usage[parser->local.usage_index] = usage; 240 241 /* 242 * If Usage item only includes usage id, concatenate it with 243 * currently defined usage page 244 */ 245 if (size <= 2) 246 complete_usage(parser, parser->local.usage_index); 247 248 parser->local.usage_size[parser->local.usage_index] = size; 249 parser->local.collection_index[parser->local.usage_index] = 250 parser->collection_stack_ptr ? 251 parser->collection_stack[parser->collection_stack_ptr - 1] : 0; 252 parser->local.usage_index++; 253 return 0; 254 } 255 256 /* 257 * Register a new field for this report. 258 */ 259 260 static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsigned flags) 261 { 262 struct hid_report *report; 263 struct hid_field *field; 264 unsigned int usages; 265 unsigned int offset; 266 unsigned int i; 267 unsigned int application; 268 269 application = hid_lookup_collection(parser, HID_COLLECTION_APPLICATION); 270 271 report = hid_register_report(parser->device, report_type, 272 parser->global.report_id, application); 273 if (!report) { 274 hid_err(parser->device, "hid_register_report failed\n"); 275 return -1; 276 } 277 278 /* Handle both signed and unsigned cases properly */ 279 if ((parser->global.logical_minimum < 0 && 280 parser->global.logical_maximum < 281 parser->global.logical_minimum) || 282 (parser->global.logical_minimum >= 0 && 283 (__u32)parser->global.logical_maximum < 284 (__u32)parser->global.logical_minimum)) { 285 dbg_hid("logical range invalid 0x%x 0x%x\n", 286 parser->global.logical_minimum, 287 parser->global.logical_maximum); 288 return -1; 289 } 290 291 offset = report->size; 292 report->size += parser->global.report_size * parser->global.report_count; 293 294 /* Total size check: Allow for possible report index byte */ 295 if (report->size > (HID_MAX_BUFFER_SIZE - 1) << 3) { 296 hid_err(parser->device, "report is too long\n"); 297 return -1; 298 } 299 300 if (!parser->local.usage_index) /* Ignore padding fields */ 301 return 0; 302 303 usages = max_t(unsigned, parser->local.usage_index, 304 parser->global.report_count); 305 306 field = hid_register_field(report, usages); 307 if (!field) 308 return 0; 309 310 field->physical = hid_lookup_collection(parser, HID_COLLECTION_PHYSICAL); 311 field->logical = hid_lookup_collection(parser, HID_COLLECTION_LOGICAL); 312 field->application = application; 313 314 for (i = 0; i < usages; i++) { 315 unsigned j = i; 316 /* Duplicate the last usage we parsed if we have excess values */ 317 if (i >= parser->local.usage_index) 318 j = parser->local.usage_index - 1; 319 field->usage[i].hid = parser->local.usage[j]; 320 field->usage[i].collection_index = 321 parser->local.collection_index[j]; 322 field->usage[i].usage_index = i; 323 field->usage[i].resolution_multiplier = 1; 324 } 325 326 field->maxusage = usages; 327 field->flags = flags; 328 field->report_offset = offset; 329 field->report_type = report_type; 330 field->report_size = parser->global.report_size; 331 field->report_count = parser->global.report_count; 332 field->logical_minimum = parser->global.logical_minimum; 333 field->logical_maximum = parser->global.logical_maximum; 334 field->physical_minimum = parser->global.physical_minimum; 335 field->physical_maximum = parser->global.physical_maximum; 336 field->unit_exponent = parser->global.unit_exponent; 337 field->unit = parser->global.unit; 338 339 return 0; 340 } 341 342 /* 343 * Read data value from item. 344 */ 345 346 static u32 item_udata(struct hid_item *item) 347 { 348 switch (item->size) { 349 case 1: return item->data.u8; 350 case 2: return item->data.u16; 351 case 4: return item->data.u32; 352 } 353 return 0; 354 } 355 356 static s32 item_sdata(struct hid_item *item) 357 { 358 switch (item->size) { 359 case 1: return item->data.s8; 360 case 2: return item->data.s16; 361 case 4: return item->data.s32; 362 } 363 return 0; 364 } 365 366 /* 367 * Process a global item. 368 */ 369 370 static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) 371 { 372 __s32 raw_value; 373 switch (item->tag) { 374 case HID_GLOBAL_ITEM_TAG_PUSH: 375 376 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 377 hid_err(parser->device, "global environment stack overflow\n"); 378 return -1; 379 } 380 381 memcpy(parser->global_stack + parser->global_stack_ptr++, 382 &parser->global, sizeof(struct hid_global)); 383 return 0; 384 385 case HID_GLOBAL_ITEM_TAG_POP: 386 387 if (!parser->global_stack_ptr) { 388 hid_err(parser->device, "global environment stack underflow\n"); 389 return -1; 390 } 391 392 memcpy(&parser->global, parser->global_stack + 393 --parser->global_stack_ptr, sizeof(struct hid_global)); 394 return 0; 395 396 case HID_GLOBAL_ITEM_TAG_USAGE_PAGE: 397 parser->global.usage_page = item_udata(item); 398 return 0; 399 400 case HID_GLOBAL_ITEM_TAG_LOGICAL_MINIMUM: 401 parser->global.logical_minimum = item_sdata(item); 402 return 0; 403 404 case HID_GLOBAL_ITEM_TAG_LOGICAL_MAXIMUM: 405 if (parser->global.logical_minimum < 0) 406 parser->global.logical_maximum = item_sdata(item); 407 else 408 parser->global.logical_maximum = item_udata(item); 409 return 0; 410 411 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MINIMUM: 412 parser->global.physical_minimum = item_sdata(item); 413 return 0; 414 415 case HID_GLOBAL_ITEM_TAG_PHYSICAL_MAXIMUM: 416 if (parser->global.physical_minimum < 0) 417 parser->global.physical_maximum = item_sdata(item); 418 else 419 parser->global.physical_maximum = item_udata(item); 420 return 0; 421 422 case HID_GLOBAL_ITEM_TAG_UNIT_EXPONENT: 423 /* Many devices provide unit exponent as a two's complement 424 * nibble due to the common misunderstanding of HID 425 * specification 1.11, 6.2.2.7 Global Items. Attempt to handle 426 * both this and the standard encoding. */ 427 raw_value = item_sdata(item); 428 if (!(raw_value & 0xfffffff0)) 429 parser->global.unit_exponent = hid_snto32(raw_value, 4); 430 else 431 parser->global.unit_exponent = raw_value; 432 return 0; 433 434 case HID_GLOBAL_ITEM_TAG_UNIT: 435 parser->global.unit = item_udata(item); 436 return 0; 437 438 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 439 parser->global.report_size = item_udata(item); 440 if (parser->global.report_size > 256) { 441 hid_err(parser->device, "invalid report_size %d\n", 442 parser->global.report_size); 443 return -1; 444 } 445 return 0; 446 447 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 448 parser->global.report_count = item_udata(item); 449 if (parser->global.report_count > HID_MAX_USAGES) { 450 hid_err(parser->device, "invalid report_count %d\n", 451 parser->global.report_count); 452 return -1; 453 } 454 return 0; 455 456 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 457 parser->global.report_id = item_udata(item); 458 if (parser->global.report_id == 0 || 459 parser->global.report_id >= HID_MAX_IDS) { 460 hid_err(parser->device, "report_id %u is invalid\n", 461 parser->global.report_id); 462 return -1; 463 } 464 return 0; 465 466 default: 467 hid_err(parser->device, "unknown global tag 0x%x\n", item->tag); 468 return -1; 469 } 470 } 471 472 /* 473 * Process a local item. 474 */ 475 476 static int hid_parser_local(struct hid_parser *parser, struct hid_item *item) 477 { 478 __u32 data; 479 unsigned n; 480 __u32 count; 481 482 data = item_udata(item); 483 484 switch (item->tag) { 485 case HID_LOCAL_ITEM_TAG_DELIMITER: 486 487 if (data) { 488 /* 489 * We treat items before the first delimiter 490 * as global to all usage sets (branch 0). 491 * In the moment we process only these global 492 * items and the first delimiter set. 493 */ 494 if (parser->local.delimiter_depth != 0) { 495 hid_err(parser->device, "nested delimiters\n"); 496 return -1; 497 } 498 parser->local.delimiter_depth++; 499 parser->local.delimiter_branch++; 500 } else { 501 if (parser->local.delimiter_depth < 1) { 502 hid_err(parser->device, "bogus close delimiter\n"); 503 return -1; 504 } 505 parser->local.delimiter_depth--; 506 } 507 return 0; 508 509 case HID_LOCAL_ITEM_TAG_USAGE: 510 511 if (parser->local.delimiter_branch > 1) { 512 dbg_hid("alternative usage ignored\n"); 513 return 0; 514 } 515 516 return hid_add_usage(parser, data, item->size); 517 518 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 519 520 if (parser->local.delimiter_branch > 1) { 521 dbg_hid("alternative usage ignored\n"); 522 return 0; 523 } 524 525 parser->local.usage_minimum = data; 526 return 0; 527 528 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 529 530 if (parser->local.delimiter_branch > 1) { 531 dbg_hid("alternative usage ignored\n"); 532 return 0; 533 } 534 535 count = data - parser->local.usage_minimum; 536 if (count + parser->local.usage_index >= HID_MAX_USAGES) { 537 /* 538 * We do not warn if the name is not set, we are 539 * actually pre-scanning the device. 540 */ 541 if (dev_name(&parser->device->dev)) 542 hid_warn(parser->device, 543 "ignoring exceeding usage max\n"); 544 data = HID_MAX_USAGES - parser->local.usage_index + 545 parser->local.usage_minimum - 1; 546 if (data <= 0) { 547 hid_err(parser->device, 548 "no more usage index available\n"); 549 return -1; 550 } 551 } 552 553 for (n = parser->local.usage_minimum; n <= data; n++) 554 if (hid_add_usage(parser, n, item->size)) { 555 dbg_hid("hid_add_usage failed\n"); 556 return -1; 557 } 558 return 0; 559 560 default: 561 562 dbg_hid("unknown local item tag 0x%x\n", item->tag); 563 return 0; 564 } 565 return 0; 566 } 567 568 /* 569 * Concatenate Usage Pages into Usages where relevant: 570 * As per specification, 6.2.2.8: "When the parser encounters a main item it 571 * concatenates the last declared Usage Page with a Usage to form a complete 572 * usage value." 573 */ 574 575 static void hid_concatenate_last_usage_page(struct hid_parser *parser) 576 { 577 int i; 578 unsigned int usage_page; 579 unsigned int current_page; 580 581 if (!parser->local.usage_index) 582 return; 583 584 usage_page = parser->global.usage_page; 585 586 /* 587 * Concatenate usage page again only if last declared Usage Page 588 * has not been already used in previous usages concatenation 589 */ 590 for (i = parser->local.usage_index - 1; i >= 0; i--) { 591 if (parser->local.usage_size[i] > 2) 592 /* Ignore extended usages */ 593 continue; 594 595 current_page = parser->local.usage[i] >> 16; 596 if (current_page == usage_page) 597 break; 598 599 complete_usage(parser, i); 600 } 601 } 602 603 /* 604 * Process a main item. 605 */ 606 607 static int hid_parser_main(struct hid_parser *parser, struct hid_item *item) 608 { 609 __u32 data; 610 int ret; 611 612 hid_concatenate_last_usage_page(parser); 613 614 data = item_udata(item); 615 616 switch (item->tag) { 617 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 618 ret = open_collection(parser, data & 0xff); 619 break; 620 case HID_MAIN_ITEM_TAG_END_COLLECTION: 621 ret = close_collection(parser); 622 break; 623 case HID_MAIN_ITEM_TAG_INPUT: 624 ret = hid_add_field(parser, HID_INPUT_REPORT, data); 625 break; 626 case HID_MAIN_ITEM_TAG_OUTPUT: 627 ret = hid_add_field(parser, HID_OUTPUT_REPORT, data); 628 break; 629 case HID_MAIN_ITEM_TAG_FEATURE: 630 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 631 break; 632 default: 633 hid_warn(parser->device, "unknown main item tag 0x%x\n", item->tag); 634 ret = 0; 635 } 636 637 memset(&parser->local, 0, sizeof(parser->local)); /* Reset the local parser environment */ 638 639 return ret; 640 } 641 642 /* 643 * Process a reserved item. 644 */ 645 646 static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 647 { 648 dbg_hid("reserved item type, tag 0x%x\n", item->tag); 649 return 0; 650 } 651 652 /* 653 * Free a report and all registered fields. The field->usage and 654 * field->value table's are allocated behind the field, so we need 655 * only to free(field) itself. 656 */ 657 658 static void hid_free_report(struct hid_report *report) 659 { 660 unsigned n; 661 662 kfree(report->field_entries); 663 664 for (n = 0; n < report->maxfield; n++) 665 kfree(report->field[n]); 666 kfree(report); 667 } 668 669 /* 670 * Close report. This function returns the device 671 * state to the point prior to hid_open_report(). 672 */ 673 static void hid_close_report(struct hid_device *device) 674 { 675 unsigned i, j; 676 677 for (i = 0; i < HID_REPORT_TYPES; i++) { 678 struct hid_report_enum *report_enum = device->report_enum + i; 679 680 for (j = 0; j < HID_MAX_IDS; j++) { 681 struct hid_report *report = report_enum->report_id_hash[j]; 682 if (report) 683 hid_free_report(report); 684 } 685 memset(report_enum, 0, sizeof(*report_enum)); 686 INIT_LIST_HEAD(&report_enum->report_list); 687 } 688 689 kfree(device->rdesc); 690 device->rdesc = NULL; 691 device->rsize = 0; 692 693 kfree(device->collection); 694 device->collection = NULL; 695 device->collection_size = 0; 696 device->maxcollection = 0; 697 device->maxapplication = 0; 698 699 device->status &= ~HID_STAT_PARSED; 700 } 701 702 /* 703 * Free a device structure, all reports, and all fields. 704 */ 705 706 static void hid_device_release(struct device *dev) 707 { 708 struct hid_device *hid = to_hid_device(dev); 709 710 hid_close_report(hid); 711 kfree(hid->dev_rdesc); 712 kfree(hid); 713 } 714 715 /* 716 * Fetch a report description item from the data stream. We support long 717 * items, though they are not used yet. 718 */ 719 720 static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) 721 { 722 u8 b; 723 724 if ((end - start) <= 0) 725 return NULL; 726 727 b = *start++; 728 729 item->type = (b >> 2) & 3; 730 item->tag = (b >> 4) & 15; 731 732 if (item->tag == HID_ITEM_TAG_LONG) { 733 734 item->format = HID_ITEM_FORMAT_LONG; 735 736 if ((end - start) < 2) 737 return NULL; 738 739 item->size = *start++; 740 item->tag = *start++; 741 742 if ((end - start) < item->size) 743 return NULL; 744 745 item->data.longdata = start; 746 start += item->size; 747 return start; 748 } 749 750 item->format = HID_ITEM_FORMAT_SHORT; 751 item->size = b & 3; 752 753 switch (item->size) { 754 case 0: 755 return start; 756 757 case 1: 758 if ((end - start) < 1) 759 return NULL; 760 item->data.u8 = *start++; 761 return start; 762 763 case 2: 764 if ((end - start) < 2) 765 return NULL; 766 item->data.u16 = get_unaligned_le16(start); 767 start = (__u8 *)((__le16 *)start + 1); 768 return start; 769 770 case 3: 771 item->size++; 772 if ((end - start) < 4) 773 return NULL; 774 item->data.u32 = get_unaligned_le32(start); 775 start = (__u8 *)((__le32 *)start + 1); 776 return start; 777 } 778 779 return NULL; 780 } 781 782 static void hid_scan_input_usage(struct hid_parser *parser, u32 usage) 783 { 784 struct hid_device *hid = parser->device; 785 786 if (usage == HID_DG_CONTACTID) 787 hid->group = HID_GROUP_MULTITOUCH; 788 } 789 790 static void hid_scan_feature_usage(struct hid_parser *parser, u32 usage) 791 { 792 if (usage == 0xff0000c5 && parser->global.report_count == 256 && 793 parser->global.report_size == 8) 794 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 795 796 if (usage == 0xff0000c6 && parser->global.report_count == 1 && 797 parser->global.report_size == 8) 798 parser->scan_flags |= HID_SCAN_FLAG_MT_WIN_8; 799 } 800 801 static void hid_scan_collection(struct hid_parser *parser, unsigned type) 802 { 803 struct hid_device *hid = parser->device; 804 int i; 805 806 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 807 type == HID_COLLECTION_PHYSICAL) 808 hid->group = HID_GROUP_SENSOR_HUB; 809 810 if (hid->vendor == USB_VENDOR_ID_MICROSOFT && 811 hid->product == USB_DEVICE_ID_MS_POWER_COVER && 812 hid->group == HID_GROUP_MULTITOUCH) 813 hid->group = HID_GROUP_GENERIC; 814 815 if ((parser->global.usage_page << 16) == HID_UP_GENDESK) 816 for (i = 0; i < parser->local.usage_index; i++) 817 if (parser->local.usage[i] == HID_GD_POINTER) 818 parser->scan_flags |= HID_SCAN_FLAG_GD_POINTER; 819 820 if ((parser->global.usage_page << 16) >= HID_UP_MSVENDOR) 821 parser->scan_flags |= HID_SCAN_FLAG_VENDOR_SPECIFIC; 822 823 if ((parser->global.usage_page << 16) == HID_UP_GOOGLEVENDOR) 824 for (i = 0; i < parser->local.usage_index; i++) 825 if (parser->local.usage[i] == 826 (HID_UP_GOOGLEVENDOR | 0x0001)) 827 parser->device->group = 828 HID_GROUP_VIVALDI; 829 } 830 831 static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 832 { 833 __u32 data; 834 int i; 835 836 hid_concatenate_last_usage_page(parser); 837 838 data = item_udata(item); 839 840 switch (item->tag) { 841 case HID_MAIN_ITEM_TAG_BEGIN_COLLECTION: 842 hid_scan_collection(parser, data & 0xff); 843 break; 844 case HID_MAIN_ITEM_TAG_END_COLLECTION: 845 break; 846 case HID_MAIN_ITEM_TAG_INPUT: 847 /* ignore constant inputs, they will be ignored by hid-input */ 848 if (data & HID_MAIN_ITEM_CONSTANT) 849 break; 850 for (i = 0; i < parser->local.usage_index; i++) 851 hid_scan_input_usage(parser, parser->local.usage[i]); 852 break; 853 case HID_MAIN_ITEM_TAG_OUTPUT: 854 break; 855 case HID_MAIN_ITEM_TAG_FEATURE: 856 for (i = 0; i < parser->local.usage_index; i++) 857 hid_scan_feature_usage(parser, parser->local.usage[i]); 858 break; 859 } 860 861 /* Reset the local parser environment */ 862 memset(&parser->local, 0, sizeof(parser->local)); 863 864 return 0; 865 } 866 867 /* 868 * Scan a report descriptor before the device is added to the bus. 869 * Sets device groups and other properties that determine what driver 870 * to load. 871 */ 872 static int hid_scan_report(struct hid_device *hid) 873 { 874 struct hid_parser *parser; 875 struct hid_item item; 876 __u8 *start = hid->dev_rdesc; 877 __u8 *end = start + hid->dev_rsize; 878 static int (*dispatch_type[])(struct hid_parser *parser, 879 struct hid_item *item) = { 880 hid_scan_main, 881 hid_parser_global, 882 hid_parser_local, 883 hid_parser_reserved 884 }; 885 886 parser = vzalloc(sizeof(struct hid_parser)); 887 if (!parser) 888 return -ENOMEM; 889 890 parser->device = hid; 891 hid->group = HID_GROUP_GENERIC; 892 893 /* 894 * The parsing is simpler than the one in hid_open_report() as we should 895 * be robust against hid errors. Those errors will be raised by 896 * hid_open_report() anyway. 897 */ 898 while ((start = fetch_item(start, end, &item)) != NULL) 899 dispatch_type[item.type](parser, &item); 900 901 /* 902 * Handle special flags set during scanning. 903 */ 904 if ((parser->scan_flags & HID_SCAN_FLAG_MT_WIN_8) && 905 (hid->group == HID_GROUP_MULTITOUCH)) 906 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 907 908 /* 909 * Vendor specific handlings 910 */ 911 switch (hid->vendor) { 912 case USB_VENDOR_ID_WACOM: 913 hid->group = HID_GROUP_WACOM; 914 break; 915 case USB_VENDOR_ID_SYNAPTICS: 916 if (hid->group == HID_GROUP_GENERIC) 917 if ((parser->scan_flags & HID_SCAN_FLAG_VENDOR_SPECIFIC) 918 && (parser->scan_flags & HID_SCAN_FLAG_GD_POINTER)) 919 /* 920 * hid-rmi should take care of them, 921 * not hid-generic 922 */ 923 hid->group = HID_GROUP_RMI; 924 break; 925 } 926 927 kfree(parser->collection_stack); 928 vfree(parser); 929 return 0; 930 } 931 932 /** 933 * hid_parse_report - parse device report 934 * 935 * @hid: hid device 936 * @start: report start 937 * @size: report size 938 * 939 * Allocate the device report as read by the bus driver. This function should 940 * only be called from parse() in ll drivers. 941 */ 942 int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size) 943 { 944 hid->dev_rdesc = kmemdup(start, size, GFP_KERNEL); 945 if (!hid->dev_rdesc) 946 return -ENOMEM; 947 hid->dev_rsize = size; 948 return 0; 949 } 950 EXPORT_SYMBOL_GPL(hid_parse_report); 951 952 static const char * const hid_report_names[] = { 953 "HID_INPUT_REPORT", 954 "HID_OUTPUT_REPORT", 955 "HID_FEATURE_REPORT", 956 }; 957 /** 958 * hid_validate_values - validate existing device report's value indexes 959 * 960 * @hid: hid device 961 * @type: which report type to examine 962 * @id: which report ID to examine (0 for first) 963 * @field_index: which report field to examine 964 * @report_counts: expected number of values 965 * 966 * Validate the number of values in a given field of a given report, after 967 * parsing. 968 */ 969 struct hid_report *hid_validate_values(struct hid_device *hid, 970 enum hid_report_type type, unsigned int id, 971 unsigned int field_index, 972 unsigned int report_counts) 973 { 974 struct hid_report *report; 975 976 if (type > HID_FEATURE_REPORT) { 977 hid_err(hid, "invalid HID report type %u\n", type); 978 return NULL; 979 } 980 981 if (id >= HID_MAX_IDS) { 982 hid_err(hid, "invalid HID report id %u\n", id); 983 return NULL; 984 } 985 986 /* 987 * Explicitly not using hid_get_report() here since it depends on 988 * ->numbered being checked, which may not always be the case when 989 * drivers go to access report values. 990 */ 991 if (id == 0) { 992 /* 993 * Validating on id 0 means we should examine the first 994 * report in the list. 995 */ 996 report = list_first_entry_or_null( 997 &hid->report_enum[type].report_list, 998 struct hid_report, list); 999 } else { 1000 report = hid->report_enum[type].report_id_hash[id]; 1001 } 1002 if (!report) { 1003 hid_err(hid, "missing %s %u\n", hid_report_names[type], id); 1004 return NULL; 1005 } 1006 if (report->maxfield <= field_index) { 1007 hid_err(hid, "not enough fields in %s %u\n", 1008 hid_report_names[type], id); 1009 return NULL; 1010 } 1011 if (report->field[field_index]->report_count < report_counts) { 1012 hid_err(hid, "not enough values in %s %u field %u\n", 1013 hid_report_names[type], id, field_index); 1014 return NULL; 1015 } 1016 return report; 1017 } 1018 EXPORT_SYMBOL_GPL(hid_validate_values); 1019 1020 static int hid_calculate_multiplier(struct hid_device *hid, 1021 struct hid_field *multiplier) 1022 { 1023 int m; 1024 __s32 v = *multiplier->value; 1025 __s32 lmin = multiplier->logical_minimum; 1026 __s32 lmax = multiplier->logical_maximum; 1027 __s32 pmin = multiplier->physical_minimum; 1028 __s32 pmax = multiplier->physical_maximum; 1029 1030 /* 1031 * "Because OS implementations will generally divide the control's 1032 * reported count by the Effective Resolution Multiplier, designers 1033 * should take care not to establish a potential Effective 1034 * Resolution Multiplier of zero." 1035 * HID Usage Table, v1.12, Section 4.3.1, p31 1036 */ 1037 if (lmax - lmin == 0) 1038 return 1; 1039 /* 1040 * Handling the unit exponent is left as an exercise to whoever 1041 * finds a device where that exponent is not 0. 1042 */ 1043 m = ((v - lmin)/(lmax - lmin) * (pmax - pmin) + pmin); 1044 if (unlikely(multiplier->unit_exponent != 0)) { 1045 hid_warn(hid, 1046 "unsupported Resolution Multiplier unit exponent %d\n", 1047 multiplier->unit_exponent); 1048 } 1049 1050 /* There are no devices with an effective multiplier > 255 */ 1051 if (unlikely(m == 0 || m > 255 || m < -255)) { 1052 hid_warn(hid, "unsupported Resolution Multiplier %d\n", m); 1053 m = 1; 1054 } 1055 1056 return m; 1057 } 1058 1059 static void hid_apply_multiplier_to_field(struct hid_device *hid, 1060 struct hid_field *field, 1061 struct hid_collection *multiplier_collection, 1062 int effective_multiplier) 1063 { 1064 struct hid_collection *collection; 1065 struct hid_usage *usage; 1066 int i; 1067 1068 /* 1069 * If multiplier_collection is NULL, the multiplier applies 1070 * to all fields in the report. 1071 * Otherwise, it is the Logical Collection the multiplier applies to 1072 * but our field may be in a subcollection of that collection. 1073 */ 1074 for (i = 0; i < field->maxusage; i++) { 1075 usage = &field->usage[i]; 1076 1077 collection = &hid->collection[usage->collection_index]; 1078 while (collection->parent_idx != -1 && 1079 collection != multiplier_collection) 1080 collection = &hid->collection[collection->parent_idx]; 1081 1082 if (collection->parent_idx != -1 || 1083 multiplier_collection == NULL) 1084 usage->resolution_multiplier = effective_multiplier; 1085 1086 } 1087 } 1088 1089 static void hid_apply_multiplier(struct hid_device *hid, 1090 struct hid_field *multiplier) 1091 { 1092 struct hid_report_enum *rep_enum; 1093 struct hid_report *rep; 1094 struct hid_field *field; 1095 struct hid_collection *multiplier_collection; 1096 int effective_multiplier; 1097 int i; 1098 1099 /* 1100 * "The Resolution Multiplier control must be contained in the same 1101 * Logical Collection as the control(s) to which it is to be applied. 1102 * If no Resolution Multiplier is defined, then the Resolution 1103 * Multiplier defaults to 1. If more than one control exists in a 1104 * Logical Collection, the Resolution Multiplier is associated with 1105 * all controls in the collection. If no Logical Collection is 1106 * defined, the Resolution Multiplier is associated with all 1107 * controls in the report." 1108 * HID Usage Table, v1.12, Section 4.3.1, p30 1109 * 1110 * Thus, search from the current collection upwards until we find a 1111 * logical collection. Then search all fields for that same parent 1112 * collection. Those are the fields the multiplier applies to. 1113 * 1114 * If we have more than one multiplier, it will overwrite the 1115 * applicable fields later. 1116 */ 1117 multiplier_collection = &hid->collection[multiplier->usage->collection_index]; 1118 while (multiplier_collection->parent_idx != -1 && 1119 multiplier_collection->type != HID_COLLECTION_LOGICAL) 1120 multiplier_collection = &hid->collection[multiplier_collection->parent_idx]; 1121 1122 effective_multiplier = hid_calculate_multiplier(hid, multiplier); 1123 1124 rep_enum = &hid->report_enum[HID_INPUT_REPORT]; 1125 list_for_each_entry(rep, &rep_enum->report_list, list) { 1126 for (i = 0; i < rep->maxfield; i++) { 1127 field = rep->field[i]; 1128 hid_apply_multiplier_to_field(hid, field, 1129 multiplier_collection, 1130 effective_multiplier); 1131 } 1132 } 1133 } 1134 1135 /* 1136 * hid_setup_resolution_multiplier - set up all resolution multipliers 1137 * 1138 * @device: hid device 1139 * 1140 * Search for all Resolution Multiplier Feature Reports and apply their 1141 * value to all matching Input items. This only updates the internal struct 1142 * fields. 1143 * 1144 * The Resolution Multiplier is applied by the hardware. If the multiplier 1145 * is anything other than 1, the hardware will send pre-multiplied events 1146 * so that the same physical interaction generates an accumulated 1147 * accumulated_value = value * * multiplier 1148 * This may be achieved by sending 1149 * - "value * multiplier" for each event, or 1150 * - "value" but "multiplier" times as frequently, or 1151 * - a combination of the above 1152 * The only guarantee is that the same physical interaction always generates 1153 * an accumulated 'value * multiplier'. 1154 * 1155 * This function must be called before any event processing and after 1156 * any SetRequest to the Resolution Multiplier. 1157 */ 1158 void hid_setup_resolution_multiplier(struct hid_device *hid) 1159 { 1160 struct hid_report_enum *rep_enum; 1161 struct hid_report *rep; 1162 struct hid_usage *usage; 1163 int i, j; 1164 1165 rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; 1166 list_for_each_entry(rep, &rep_enum->report_list, list) { 1167 for (i = 0; i < rep->maxfield; i++) { 1168 /* Ignore if report count is out of bounds. */ 1169 if (rep->field[i]->report_count < 1) 1170 continue; 1171 1172 for (j = 0; j < rep->field[i]->maxusage; j++) { 1173 usage = &rep->field[i]->usage[j]; 1174 if (usage->hid == HID_GD_RESOLUTION_MULTIPLIER) 1175 hid_apply_multiplier(hid, 1176 rep->field[i]); 1177 } 1178 } 1179 } 1180 } 1181 EXPORT_SYMBOL_GPL(hid_setup_resolution_multiplier); 1182 1183 /** 1184 * hid_open_report - open a driver-specific device report 1185 * 1186 * @device: hid device 1187 * 1188 * Parse a report description into a hid_device structure. Reports are 1189 * enumerated, fields are attached to these reports. 1190 * 0 returned on success, otherwise nonzero error value. 1191 * 1192 * This function (or the equivalent hid_parse() macro) should only be 1193 * called from probe() in drivers, before starting the device. 1194 */ 1195 int hid_open_report(struct hid_device *device) 1196 { 1197 struct hid_parser *parser; 1198 struct hid_item item; 1199 unsigned int size; 1200 __u8 *start; 1201 __u8 *buf; 1202 __u8 *end; 1203 __u8 *next; 1204 int ret; 1205 int i; 1206 static int (*dispatch_type[])(struct hid_parser *parser, 1207 struct hid_item *item) = { 1208 hid_parser_main, 1209 hid_parser_global, 1210 hid_parser_local, 1211 hid_parser_reserved 1212 }; 1213 1214 if (WARN_ON(device->status & HID_STAT_PARSED)) 1215 return -EBUSY; 1216 1217 start = device->dev_rdesc; 1218 if (WARN_ON(!start)) 1219 return -ENODEV; 1220 size = device->dev_rsize; 1221 1222 buf = kmemdup(start, size, GFP_KERNEL); 1223 if (buf == NULL) 1224 return -ENOMEM; 1225 1226 if (device->driver->report_fixup) 1227 start = device->driver->report_fixup(device, buf, &size); 1228 else 1229 start = buf; 1230 1231 start = kmemdup(start, size, GFP_KERNEL); 1232 kfree(buf); 1233 if (start == NULL) 1234 return -ENOMEM; 1235 1236 device->rdesc = start; 1237 device->rsize = size; 1238 1239 parser = vzalloc(sizeof(struct hid_parser)); 1240 if (!parser) { 1241 ret = -ENOMEM; 1242 goto alloc_err; 1243 } 1244 1245 parser->device = device; 1246 1247 end = start + size; 1248 1249 device->collection = kcalloc(HID_DEFAULT_NUM_COLLECTIONS, 1250 sizeof(struct hid_collection), GFP_KERNEL); 1251 if (!device->collection) { 1252 ret = -ENOMEM; 1253 goto err; 1254 } 1255 device->collection_size = HID_DEFAULT_NUM_COLLECTIONS; 1256 for (i = 0; i < HID_DEFAULT_NUM_COLLECTIONS; i++) 1257 device->collection[i].parent_idx = -1; 1258 1259 ret = -EINVAL; 1260 while ((next = fetch_item(start, end, &item)) != NULL) { 1261 start = next; 1262 1263 if (item.format != HID_ITEM_FORMAT_SHORT) { 1264 hid_err(device, "unexpected long global item\n"); 1265 goto err; 1266 } 1267 1268 if (dispatch_type[item.type](parser, &item)) { 1269 hid_err(device, "item %u %u %u %u parsing failed\n", 1270 item.format, (unsigned)item.size, 1271 (unsigned)item.type, (unsigned)item.tag); 1272 goto err; 1273 } 1274 1275 if (start == end) { 1276 if (parser->collection_stack_ptr) { 1277 hid_err(device, "unbalanced collection at end of report description\n"); 1278 goto err; 1279 } 1280 if (parser->local.delimiter_depth) { 1281 hid_err(device, "unbalanced delimiter at end of report description\n"); 1282 goto err; 1283 } 1284 1285 /* 1286 * fetch initial values in case the device's 1287 * default multiplier isn't the recommended 1 1288 */ 1289 hid_setup_resolution_multiplier(device); 1290 1291 kfree(parser->collection_stack); 1292 vfree(parser); 1293 device->status |= HID_STAT_PARSED; 1294 1295 return 0; 1296 } 1297 } 1298 1299 hid_err(device, "item fetching failed at offset %u/%u\n", 1300 size - (unsigned int)(end - start), size); 1301 err: 1302 kfree(parser->collection_stack); 1303 alloc_err: 1304 vfree(parser); 1305 hid_close_report(device); 1306 return ret; 1307 } 1308 EXPORT_SYMBOL_GPL(hid_open_report); 1309 1310 /* 1311 * Convert a signed n-bit integer to signed 32-bit integer. Common 1312 * cases are done through the compiler, the screwed things has to be 1313 * done by hand. 1314 */ 1315 1316 static s32 snto32(__u32 value, unsigned n) 1317 { 1318 if (!value || !n) 1319 return 0; 1320 1321 if (n > 32) 1322 n = 32; 1323 1324 switch (n) { 1325 case 8: return ((__s8)value); 1326 case 16: return ((__s16)value); 1327 case 32: return ((__s32)value); 1328 } 1329 return value & (1 << (n - 1)) ? value | (~0U << n) : value; 1330 } 1331 1332 s32 hid_snto32(__u32 value, unsigned n) 1333 { 1334 return snto32(value, n); 1335 } 1336 EXPORT_SYMBOL_GPL(hid_snto32); 1337 1338 /* 1339 * Convert a signed 32-bit integer to a signed n-bit integer. 1340 */ 1341 1342 static u32 s32ton(__s32 value, unsigned n) 1343 { 1344 s32 a = value >> (n - 1); 1345 if (a && a != -1) 1346 return value < 0 ? 1 << (n - 1) : (1 << (n - 1)) - 1; 1347 return value & ((1 << n) - 1); 1348 } 1349 1350 /* 1351 * Extract/implement a data field from/to a little endian report (bit array). 1352 * 1353 * Code sort-of follows HID spec: 1354 * http://www.usb.org/developers/hidpage/HID1_11.pdf 1355 * 1356 * While the USB HID spec allows unlimited length bit fields in "report 1357 * descriptors", most devices never use more than 16 bits. 1358 * One model of UPS is claimed to report "LINEV" as a 32-bit field. 1359 * Search linux-kernel and linux-usb-devel archives for "hid-core extract". 1360 */ 1361 1362 static u32 __extract(u8 *report, unsigned offset, int n) 1363 { 1364 unsigned int idx = offset / 8; 1365 unsigned int bit_nr = 0; 1366 unsigned int bit_shift = offset % 8; 1367 int bits_to_copy = 8 - bit_shift; 1368 u32 value = 0; 1369 u32 mask = n < 32 ? (1U << n) - 1 : ~0U; 1370 1371 while (n > 0) { 1372 value |= ((u32)report[idx] >> bit_shift) << bit_nr; 1373 n -= bits_to_copy; 1374 bit_nr += bits_to_copy; 1375 bits_to_copy = 8; 1376 bit_shift = 0; 1377 idx++; 1378 } 1379 1380 return value & mask; 1381 } 1382 1383 u32 hid_field_extract(const struct hid_device *hid, u8 *report, 1384 unsigned offset, unsigned n) 1385 { 1386 if (n > 32) { 1387 hid_warn_once(hid, "%s() called with n (%d) > 32! (%s)\n", 1388 __func__, n, current->comm); 1389 n = 32; 1390 } 1391 1392 return __extract(report, offset, n); 1393 } 1394 EXPORT_SYMBOL_GPL(hid_field_extract); 1395 1396 /* 1397 * "implement" : set bits in a little endian bit stream. 1398 * Same concepts as "extract" (see comments above). 1399 * The data mangled in the bit stream remains in little endian 1400 * order the whole time. It make more sense to talk about 1401 * endianness of register values by considering a register 1402 * a "cached" copy of the little endian bit stream. 1403 */ 1404 1405 static void __implement(u8 *report, unsigned offset, int n, u32 value) 1406 { 1407 unsigned int idx = offset / 8; 1408 unsigned int bit_shift = offset % 8; 1409 int bits_to_set = 8 - bit_shift; 1410 1411 while (n - bits_to_set >= 0) { 1412 report[idx] &= ~(0xff << bit_shift); 1413 report[idx] |= value << bit_shift; 1414 value >>= bits_to_set; 1415 n -= bits_to_set; 1416 bits_to_set = 8; 1417 bit_shift = 0; 1418 idx++; 1419 } 1420 1421 /* last nibble */ 1422 if (n) { 1423 u8 bit_mask = ((1U << n) - 1); 1424 report[idx] &= ~(bit_mask << bit_shift); 1425 report[idx] |= value << bit_shift; 1426 } 1427 } 1428 1429 static void implement(const struct hid_device *hid, u8 *report, 1430 unsigned offset, unsigned n, u32 value) 1431 { 1432 if (unlikely(n > 32)) { 1433 hid_warn(hid, "%s() called with n (%d) > 32! (%s)\n", 1434 __func__, n, current->comm); 1435 n = 32; 1436 } else if (n < 32) { 1437 u32 m = (1U << n) - 1; 1438 1439 if (unlikely(value > m)) { 1440 hid_warn(hid, 1441 "%s() called with too large value %d (n: %d)! (%s)\n", 1442 __func__, value, n, current->comm); 1443 WARN_ON(1); 1444 value &= m; 1445 } 1446 } 1447 1448 __implement(report, offset, n, value); 1449 } 1450 1451 /* 1452 * Search an array for a value. 1453 */ 1454 1455 static int search(__s32 *array, __s32 value, unsigned n) 1456 { 1457 while (n--) { 1458 if (*array++ == value) 1459 return 0; 1460 } 1461 return -1; 1462 } 1463 1464 /** 1465 * hid_match_report - check if driver's raw_event should be called 1466 * 1467 * @hid: hid device 1468 * @report: hid report to match against 1469 * 1470 * compare hid->driver->report_table->report_type to report->type 1471 */ 1472 static int hid_match_report(struct hid_device *hid, struct hid_report *report) 1473 { 1474 const struct hid_report_id *id = hid->driver->report_table; 1475 1476 if (!id) /* NULL means all */ 1477 return 1; 1478 1479 for (; id->report_type != HID_TERMINATOR; id++) 1480 if (id->report_type == HID_ANY_ID || 1481 id->report_type == report->type) 1482 return 1; 1483 return 0; 1484 } 1485 1486 /** 1487 * hid_match_usage - check if driver's event should be called 1488 * 1489 * @hid: hid device 1490 * @usage: usage to match against 1491 * 1492 * compare hid->driver->usage_table->usage_{type,code} to 1493 * usage->usage_{type,code} 1494 */ 1495 static int hid_match_usage(struct hid_device *hid, struct hid_usage *usage) 1496 { 1497 const struct hid_usage_id *id = hid->driver->usage_table; 1498 1499 if (!id) /* NULL means all */ 1500 return 1; 1501 1502 for (; id->usage_type != HID_ANY_ID - 1; id++) 1503 if ((id->usage_hid == HID_ANY_ID || 1504 id->usage_hid == usage->hid) && 1505 (id->usage_type == HID_ANY_ID || 1506 id->usage_type == usage->type) && 1507 (id->usage_code == HID_ANY_ID || 1508 id->usage_code == usage->code)) 1509 return 1; 1510 return 0; 1511 } 1512 1513 static void hid_process_event(struct hid_device *hid, struct hid_field *field, 1514 struct hid_usage *usage, __s32 value, int interrupt) 1515 { 1516 struct hid_driver *hdrv = hid->driver; 1517 int ret; 1518 1519 if (!list_empty(&hid->debug_list)) 1520 hid_dump_input(hid, usage, value); 1521 1522 if (hdrv && hdrv->event && hid_match_usage(hid, usage)) { 1523 ret = hdrv->event(hid, field, usage, value); 1524 if (ret != 0) { 1525 if (ret < 0) 1526 hid_err(hid, "%s's event failed with %d\n", 1527 hdrv->name, ret); 1528 return; 1529 } 1530 } 1531 1532 if (hid->claimed & HID_CLAIMED_INPUT) 1533 hidinput_hid_event(hid, field, usage, value); 1534 if (hid->claimed & HID_CLAIMED_HIDDEV && interrupt && hid->hiddev_hid_event) 1535 hid->hiddev_hid_event(hid, field, usage, value); 1536 } 1537 1538 /* 1539 * Checks if the given value is valid within this field 1540 */ 1541 static inline int hid_array_value_is_valid(struct hid_field *field, 1542 __s32 value) 1543 { 1544 __s32 min = field->logical_minimum; 1545 1546 /* 1547 * Value needs to be between logical min and max, and 1548 * (value - min) is used as an index in the usage array. 1549 * This array is of size field->maxusage 1550 */ 1551 return value >= min && 1552 value <= field->logical_maximum && 1553 value - min < field->maxusage; 1554 } 1555 1556 /* 1557 * Fetch the field from the data. The field content is stored for next 1558 * report processing (we do differential reporting to the layer). 1559 */ 1560 static void hid_input_fetch_field(struct hid_device *hid, 1561 struct hid_field *field, 1562 __u8 *data) 1563 { 1564 unsigned n; 1565 unsigned count = field->report_count; 1566 unsigned offset = field->report_offset; 1567 unsigned size = field->report_size; 1568 __s32 min = field->logical_minimum; 1569 __s32 *value; 1570 1571 value = field->new_value; 1572 memset(value, 0, count * sizeof(__s32)); 1573 field->ignored = false; 1574 1575 for (n = 0; n < count; n++) { 1576 1577 value[n] = min < 0 ? 1578 snto32(hid_field_extract(hid, data, offset + n * size, 1579 size), size) : 1580 hid_field_extract(hid, data, offset + n * size, size); 1581 1582 /* Ignore report if ErrorRollOver */ 1583 if (!(field->flags & HID_MAIN_ITEM_VARIABLE) && 1584 hid_array_value_is_valid(field, value[n]) && 1585 field->usage[value[n] - min].hid == HID_UP_KEYBOARD + 1) { 1586 field->ignored = true; 1587 return; 1588 } 1589 } 1590 } 1591 1592 /* 1593 * Process a received variable field. 1594 */ 1595 1596 static void hid_input_var_field(struct hid_device *hid, 1597 struct hid_field *field, 1598 int interrupt) 1599 { 1600 unsigned int count = field->report_count; 1601 __s32 *value = field->new_value; 1602 unsigned int n; 1603 1604 for (n = 0; n < count; n++) 1605 hid_process_event(hid, 1606 field, 1607 &field->usage[n], 1608 value[n], 1609 interrupt); 1610 1611 memcpy(field->value, value, count * sizeof(__s32)); 1612 } 1613 1614 /* 1615 * Process a received array field. The field content is stored for 1616 * next report processing (we do differential reporting to the layer). 1617 */ 1618 1619 static void hid_input_array_field(struct hid_device *hid, 1620 struct hid_field *field, 1621 int interrupt) 1622 { 1623 unsigned int n; 1624 unsigned int count = field->report_count; 1625 __s32 min = field->logical_minimum; 1626 __s32 *value; 1627 1628 value = field->new_value; 1629 1630 /* ErrorRollOver */ 1631 if (field->ignored) 1632 return; 1633 1634 for (n = 0; n < count; n++) { 1635 if (hid_array_value_is_valid(field, field->value[n]) && 1636 search(value, field->value[n], count)) 1637 hid_process_event(hid, 1638 field, 1639 &field->usage[field->value[n] - min], 1640 0, 1641 interrupt); 1642 1643 if (hid_array_value_is_valid(field, value[n]) && 1644 search(field->value, value[n], count)) 1645 hid_process_event(hid, 1646 field, 1647 &field->usage[value[n] - min], 1648 1, 1649 interrupt); 1650 } 1651 1652 memcpy(field->value, value, count * sizeof(__s32)); 1653 } 1654 1655 /* 1656 * Analyse a received report, and fetch the data from it. The field 1657 * content is stored for next report processing (we do differential 1658 * reporting to the layer). 1659 */ 1660 static void hid_process_report(struct hid_device *hid, 1661 struct hid_report *report, 1662 __u8 *data, 1663 int interrupt) 1664 { 1665 unsigned int a; 1666 struct hid_field_entry *entry; 1667 struct hid_field *field; 1668 1669 /* first retrieve all incoming values in data */ 1670 for (a = 0; a < report->maxfield; a++) 1671 hid_input_fetch_field(hid, report->field[a], data); 1672 1673 if (!list_empty(&report->field_entry_list)) { 1674 /* INPUT_REPORT, we have a priority list of fields */ 1675 list_for_each_entry(entry, 1676 &report->field_entry_list, 1677 list) { 1678 field = entry->field; 1679 1680 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1681 hid_process_event(hid, 1682 field, 1683 &field->usage[entry->index], 1684 field->new_value[entry->index], 1685 interrupt); 1686 else 1687 hid_input_array_field(hid, field, interrupt); 1688 } 1689 1690 /* we need to do the memcpy at the end for var items */ 1691 for (a = 0; a < report->maxfield; a++) { 1692 field = report->field[a]; 1693 1694 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1695 memcpy(field->value, field->new_value, 1696 field->report_count * sizeof(__s32)); 1697 } 1698 } else { 1699 /* FEATURE_REPORT, regular processing */ 1700 for (a = 0; a < report->maxfield; a++) { 1701 field = report->field[a]; 1702 1703 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1704 hid_input_var_field(hid, field, interrupt); 1705 else 1706 hid_input_array_field(hid, field, interrupt); 1707 } 1708 } 1709 } 1710 1711 /* 1712 * Insert a given usage_index in a field in the list 1713 * of processed usages in the report. 1714 * 1715 * The elements of lower priority score are processed 1716 * first. 1717 */ 1718 static void __hid_insert_field_entry(struct hid_device *hid, 1719 struct hid_report *report, 1720 struct hid_field_entry *entry, 1721 struct hid_field *field, 1722 unsigned int usage_index) 1723 { 1724 struct hid_field_entry *next; 1725 1726 entry->field = field; 1727 entry->index = usage_index; 1728 entry->priority = field->usages_priorities[usage_index]; 1729 1730 /* insert the element at the correct position */ 1731 list_for_each_entry(next, 1732 &report->field_entry_list, 1733 list) { 1734 /* 1735 * the priority of our element is strictly higher 1736 * than the next one, insert it before 1737 */ 1738 if (entry->priority > next->priority) { 1739 list_add_tail(&entry->list, &next->list); 1740 return; 1741 } 1742 } 1743 1744 /* lowest priority score: insert at the end */ 1745 list_add_tail(&entry->list, &report->field_entry_list); 1746 } 1747 1748 static void hid_report_process_ordering(struct hid_device *hid, 1749 struct hid_report *report) 1750 { 1751 struct hid_field *field; 1752 struct hid_field_entry *entries; 1753 unsigned int a, u, usages; 1754 unsigned int count = 0; 1755 1756 /* count the number of individual fields in the report */ 1757 for (a = 0; a < report->maxfield; a++) { 1758 field = report->field[a]; 1759 1760 if (field->flags & HID_MAIN_ITEM_VARIABLE) 1761 count += field->report_count; 1762 else 1763 count++; 1764 } 1765 1766 /* allocate the memory to process the fields */ 1767 entries = kcalloc(count, sizeof(*entries), GFP_KERNEL); 1768 if (!entries) 1769 return; 1770 1771 report->field_entries = entries; 1772 1773 /* 1774 * walk through all fields in the report and 1775 * store them by priority order in report->field_entry_list 1776 * 1777 * - Var elements are individualized (field + usage_index) 1778 * - Arrays are taken as one, we can not chose an order for them 1779 */ 1780 usages = 0; 1781 for (a = 0; a < report->maxfield; a++) { 1782 field = report->field[a]; 1783 1784 if (field->flags & HID_MAIN_ITEM_VARIABLE) { 1785 for (u = 0; u < field->report_count; u++) { 1786 __hid_insert_field_entry(hid, report, 1787 &entries[usages], 1788 field, u); 1789 usages++; 1790 } 1791 } else { 1792 __hid_insert_field_entry(hid, report, &entries[usages], 1793 field, 0); 1794 usages++; 1795 } 1796 } 1797 } 1798 1799 static void hid_process_ordering(struct hid_device *hid) 1800 { 1801 struct hid_report *report; 1802 struct hid_report_enum *report_enum = &hid->report_enum[HID_INPUT_REPORT]; 1803 1804 list_for_each_entry(report, &report_enum->report_list, list) 1805 hid_report_process_ordering(hid, report); 1806 } 1807 1808 /* 1809 * Output the field into the report. 1810 */ 1811 1812 static void hid_output_field(const struct hid_device *hid, 1813 struct hid_field *field, __u8 *data) 1814 { 1815 unsigned count = field->report_count; 1816 unsigned offset = field->report_offset; 1817 unsigned size = field->report_size; 1818 unsigned n; 1819 1820 for (n = 0; n < count; n++) { 1821 if (field->logical_minimum < 0) /* signed values */ 1822 implement(hid, data, offset + n * size, size, 1823 s32ton(field->value[n], size)); 1824 else /* unsigned values */ 1825 implement(hid, data, offset + n * size, size, 1826 field->value[n]); 1827 } 1828 } 1829 1830 /* 1831 * Compute the size of a report. 1832 */ 1833 static size_t hid_compute_report_size(struct hid_report *report) 1834 { 1835 if (report->size) 1836 return ((report->size - 1) >> 3) + 1; 1837 1838 return 0; 1839 } 1840 1841 /* 1842 * Create a report. 'data' has to be allocated using 1843 * hid_alloc_report_buf() so that it has proper size. 1844 */ 1845 1846 void hid_output_report(struct hid_report *report, __u8 *data) 1847 { 1848 unsigned n; 1849 1850 if (report->id > 0) 1851 *data++ = report->id; 1852 1853 memset(data, 0, hid_compute_report_size(report)); 1854 for (n = 0; n < report->maxfield; n++) 1855 hid_output_field(report->device, report->field[n], data); 1856 } 1857 EXPORT_SYMBOL_GPL(hid_output_report); 1858 1859 /* 1860 * Allocator for buffer that is going to be passed to hid_output_report() 1861 */ 1862 u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) 1863 { 1864 /* 1865 * 7 extra bytes are necessary to achieve proper functionality 1866 * of implement() working on 8 byte chunks 1867 */ 1868 1869 u32 len = hid_report_len(report) + 7; 1870 1871 return kmalloc(len, flags); 1872 } 1873 EXPORT_SYMBOL_GPL(hid_alloc_report_buf); 1874 1875 /* 1876 * Set a field value. The report this field belongs to has to be 1877 * created and transferred to the device, to set this value in the 1878 * device. 1879 */ 1880 1881 int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) 1882 { 1883 unsigned size; 1884 1885 if (!field) 1886 return -1; 1887 1888 size = field->report_size; 1889 1890 hid_dump_input(field->report->device, field->usage + offset, value); 1891 1892 if (offset >= field->report_count) { 1893 hid_err(field->report->device, "offset (%d) exceeds report_count (%d)\n", 1894 offset, field->report_count); 1895 return -1; 1896 } 1897 if (field->logical_minimum < 0) { 1898 if (value != snto32(s32ton(value, size), size)) { 1899 hid_err(field->report->device, "value %d is out of range\n", value); 1900 return -1; 1901 } 1902 } 1903 field->value[offset] = value; 1904 return 0; 1905 } 1906 EXPORT_SYMBOL_GPL(hid_set_field); 1907 1908 static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, 1909 const u8 *data) 1910 { 1911 struct hid_report *report; 1912 unsigned int n = 0; /* Normally report number is 0 */ 1913 1914 /* Device uses numbered reports, data[0] is report number */ 1915 if (report_enum->numbered) 1916 n = *data; 1917 1918 report = report_enum->report_id_hash[n]; 1919 if (report == NULL) 1920 dbg_hid("undefined report_id %u received\n", n); 1921 1922 return report; 1923 } 1924 1925 /* 1926 * Implement a generic .request() callback, using .raw_request() 1927 * DO NOT USE in hid drivers directly, but through hid_hw_request instead. 1928 */ 1929 int __hid_request(struct hid_device *hid, struct hid_report *report, 1930 enum hid_class_request reqtype) 1931 { 1932 char *buf; 1933 int ret; 1934 u32 len; 1935 1936 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1937 if (!buf) 1938 return -ENOMEM; 1939 1940 len = hid_report_len(report); 1941 1942 if (reqtype == HID_REQ_SET_REPORT) 1943 hid_output_report(report, buf); 1944 1945 ret = hid->ll_driver->raw_request(hid, report->id, buf, len, 1946 report->type, reqtype); 1947 if (ret < 0) { 1948 dbg_hid("unable to complete request: %d\n", ret); 1949 goto out; 1950 } 1951 1952 if (reqtype == HID_REQ_GET_REPORT) 1953 hid_input_report(hid, report->type, buf, ret, 0); 1954 1955 ret = 0; 1956 1957 out: 1958 kfree(buf); 1959 return ret; 1960 } 1961 EXPORT_SYMBOL_GPL(__hid_request); 1962 1963 int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 1964 int interrupt) 1965 { 1966 struct hid_report_enum *report_enum = hid->report_enum + type; 1967 struct hid_report *report; 1968 struct hid_driver *hdrv; 1969 u32 rsize, csize = size; 1970 u8 *cdata = data; 1971 int ret = 0; 1972 1973 report = hid_get_report(report_enum, data); 1974 if (!report) 1975 goto out; 1976 1977 if (report_enum->numbered) { 1978 cdata++; 1979 csize--; 1980 } 1981 1982 rsize = hid_compute_report_size(report); 1983 1984 if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE) 1985 rsize = HID_MAX_BUFFER_SIZE - 1; 1986 else if (rsize > HID_MAX_BUFFER_SIZE) 1987 rsize = HID_MAX_BUFFER_SIZE; 1988 1989 if (csize < rsize) { 1990 dbg_hid("report %d is too short, (%d < %d)\n", report->id, 1991 csize, rsize); 1992 memset(cdata + csize, 0, rsize - csize); 1993 } 1994 1995 if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event) 1996 hid->hiddev_report_event(hid, report); 1997 if (hid->claimed & HID_CLAIMED_HIDRAW) { 1998 ret = hidraw_report_event(hid, data, size); 1999 if (ret) 2000 goto out; 2001 } 2002 2003 if (hid->claimed != HID_CLAIMED_HIDRAW && report->maxfield) { 2004 hid_process_report(hid, report, cdata, interrupt); 2005 hdrv = hid->driver; 2006 if (hdrv && hdrv->report) 2007 hdrv->report(hid, report); 2008 } 2009 2010 if (hid->claimed & HID_CLAIMED_INPUT) 2011 hidinput_report_event(hid, report); 2012 out: 2013 return ret; 2014 } 2015 EXPORT_SYMBOL_GPL(hid_report_raw_event); 2016 2017 /** 2018 * hid_input_report - report data from lower layer (usb, bt...) 2019 * 2020 * @hid: hid device 2021 * @type: HID report type (HID_*_REPORT) 2022 * @data: report contents 2023 * @size: size of data parameter 2024 * @interrupt: distinguish between interrupt and control transfers 2025 * 2026 * This is data entry for lower layers. 2027 */ 2028 int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, 2029 int interrupt) 2030 { 2031 struct hid_report_enum *report_enum; 2032 struct hid_driver *hdrv; 2033 struct hid_report *report; 2034 int ret = 0; 2035 2036 if (!hid) 2037 return -ENODEV; 2038 2039 if (down_trylock(&hid->driver_input_lock)) 2040 return -EBUSY; 2041 2042 if (!hid->driver) { 2043 ret = -ENODEV; 2044 goto unlock; 2045 } 2046 report_enum = hid->report_enum + type; 2047 hdrv = hid->driver; 2048 2049 if (!size) { 2050 dbg_hid("empty report\n"); 2051 ret = -1; 2052 goto unlock; 2053 } 2054 2055 /* Avoid unnecessary overhead if debugfs is disabled */ 2056 if (!list_empty(&hid->debug_list)) 2057 hid_dump_report(hid, type, data, size); 2058 2059 report = hid_get_report(report_enum, data); 2060 2061 if (!report) { 2062 ret = -1; 2063 goto unlock; 2064 } 2065 2066 if (hdrv && hdrv->raw_event && hid_match_report(hid, report)) { 2067 ret = hdrv->raw_event(hid, report, data, size); 2068 if (ret < 0) 2069 goto unlock; 2070 } 2071 2072 ret = hid_report_raw_event(hid, type, data, size, interrupt); 2073 2074 unlock: 2075 up(&hid->driver_input_lock); 2076 return ret; 2077 } 2078 EXPORT_SYMBOL_GPL(hid_input_report); 2079 2080 bool hid_match_one_id(const struct hid_device *hdev, 2081 const struct hid_device_id *id) 2082 { 2083 return (id->bus == HID_BUS_ANY || id->bus == hdev->bus) && 2084 (id->group == HID_GROUP_ANY || id->group == hdev->group) && 2085 (id->vendor == HID_ANY_ID || id->vendor == hdev->vendor) && 2086 (id->product == HID_ANY_ID || id->product == hdev->product); 2087 } 2088 2089 const struct hid_device_id *hid_match_id(const struct hid_device *hdev, 2090 const struct hid_device_id *id) 2091 { 2092 for (; id->bus; id++) 2093 if (hid_match_one_id(hdev, id)) 2094 return id; 2095 2096 return NULL; 2097 } 2098 EXPORT_SYMBOL_GPL(hid_match_id); 2099 2100 static const struct hid_device_id hid_hiddev_list[] = { 2101 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS) }, 2102 { HID_USB_DEVICE(USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS1) }, 2103 { } 2104 }; 2105 2106 static bool hid_hiddev(struct hid_device *hdev) 2107 { 2108 return !!hid_match_id(hdev, hid_hiddev_list); 2109 } 2110 2111 2112 static ssize_t 2113 read_report_descriptor(struct file *filp, struct kobject *kobj, 2114 struct bin_attribute *attr, 2115 char *buf, loff_t off, size_t count) 2116 { 2117 struct device *dev = kobj_to_dev(kobj); 2118 struct hid_device *hdev = to_hid_device(dev); 2119 2120 if (off >= hdev->rsize) 2121 return 0; 2122 2123 if (off + count > hdev->rsize) 2124 count = hdev->rsize - off; 2125 2126 memcpy(buf, hdev->rdesc + off, count); 2127 2128 return count; 2129 } 2130 2131 static ssize_t 2132 show_country(struct device *dev, struct device_attribute *attr, 2133 char *buf) 2134 { 2135 struct hid_device *hdev = to_hid_device(dev); 2136 2137 return sprintf(buf, "%02x\n", hdev->country & 0xff); 2138 } 2139 2140 static struct bin_attribute dev_bin_attr_report_desc = { 2141 .attr = { .name = "report_descriptor", .mode = 0444 }, 2142 .read = read_report_descriptor, 2143 .size = HID_MAX_DESCRIPTOR_SIZE, 2144 }; 2145 2146 static const struct device_attribute dev_attr_country = { 2147 .attr = { .name = "country", .mode = 0444 }, 2148 .show = show_country, 2149 }; 2150 2151 int hid_connect(struct hid_device *hdev, unsigned int connect_mask) 2152 { 2153 static const char *types[] = { "Device", "Pointer", "Mouse", "Device", 2154 "Joystick", "Gamepad", "Keyboard", "Keypad", 2155 "Multi-Axis Controller" 2156 }; 2157 const char *type, *bus; 2158 char buf[64] = ""; 2159 unsigned int i; 2160 int len; 2161 int ret; 2162 2163 if (hdev->quirks & HID_QUIRK_HIDDEV_FORCE) 2164 connect_mask |= (HID_CONNECT_HIDDEV_FORCE | HID_CONNECT_HIDDEV); 2165 if (hdev->quirks & HID_QUIRK_HIDINPUT_FORCE) 2166 connect_mask |= HID_CONNECT_HIDINPUT_FORCE; 2167 if (hdev->bus != BUS_USB) 2168 connect_mask &= ~HID_CONNECT_HIDDEV; 2169 if (hid_hiddev(hdev)) 2170 connect_mask |= HID_CONNECT_HIDDEV_FORCE; 2171 2172 if ((connect_mask & HID_CONNECT_HIDINPUT) && !hidinput_connect(hdev, 2173 connect_mask & HID_CONNECT_HIDINPUT_FORCE)) 2174 hdev->claimed |= HID_CLAIMED_INPUT; 2175 2176 if ((connect_mask & HID_CONNECT_HIDDEV) && hdev->hiddev_connect && 2177 !hdev->hiddev_connect(hdev, 2178 connect_mask & HID_CONNECT_HIDDEV_FORCE)) 2179 hdev->claimed |= HID_CLAIMED_HIDDEV; 2180 if ((connect_mask & HID_CONNECT_HIDRAW) && !hidraw_connect(hdev)) 2181 hdev->claimed |= HID_CLAIMED_HIDRAW; 2182 2183 if (connect_mask & HID_CONNECT_DRIVER) 2184 hdev->claimed |= HID_CLAIMED_DRIVER; 2185 2186 /* Drivers with the ->raw_event callback set are not required to connect 2187 * to any other listener. */ 2188 if (!hdev->claimed && !hdev->driver->raw_event) { 2189 hid_err(hdev, "device has no listeners, quitting\n"); 2190 return -ENODEV; 2191 } 2192 2193 hid_process_ordering(hdev); 2194 2195 if ((hdev->claimed & HID_CLAIMED_INPUT) && 2196 (connect_mask & HID_CONNECT_FF) && hdev->ff_init) 2197 hdev->ff_init(hdev); 2198 2199 len = 0; 2200 if (hdev->claimed & HID_CLAIMED_INPUT) 2201 len += sprintf(buf + len, "input"); 2202 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2203 len += sprintf(buf + len, "%shiddev%d", len ? "," : "", 2204 ((struct hiddev *)hdev->hiddev)->minor); 2205 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2206 len += sprintf(buf + len, "%shidraw%d", len ? "," : "", 2207 ((struct hidraw *)hdev->hidraw)->minor); 2208 2209 type = "Device"; 2210 for (i = 0; i < hdev->maxcollection; i++) { 2211 struct hid_collection *col = &hdev->collection[i]; 2212 if (col->type == HID_COLLECTION_APPLICATION && 2213 (col->usage & HID_USAGE_PAGE) == HID_UP_GENDESK && 2214 (col->usage & 0xffff) < ARRAY_SIZE(types)) { 2215 type = types[col->usage & 0xffff]; 2216 break; 2217 } 2218 } 2219 2220 switch (hdev->bus) { 2221 case BUS_USB: 2222 bus = "USB"; 2223 break; 2224 case BUS_BLUETOOTH: 2225 bus = "BLUETOOTH"; 2226 break; 2227 case BUS_I2C: 2228 bus = "I2C"; 2229 break; 2230 case BUS_VIRTUAL: 2231 bus = "VIRTUAL"; 2232 break; 2233 case BUS_INTEL_ISHTP: 2234 case BUS_AMD_SFH: 2235 bus = "SENSOR HUB"; 2236 break; 2237 default: 2238 bus = "<UNKNOWN>"; 2239 } 2240 2241 ret = device_create_file(&hdev->dev, &dev_attr_country); 2242 if (ret) 2243 hid_warn(hdev, 2244 "can't create sysfs country code attribute err: %d\n", ret); 2245 2246 hid_info(hdev, "%s: %s HID v%x.%02x %s [%s] on %s\n", 2247 buf, bus, hdev->version >> 8, hdev->version & 0xff, 2248 type, hdev->name, hdev->phys); 2249 2250 return 0; 2251 } 2252 EXPORT_SYMBOL_GPL(hid_connect); 2253 2254 void hid_disconnect(struct hid_device *hdev) 2255 { 2256 device_remove_file(&hdev->dev, &dev_attr_country); 2257 if (hdev->claimed & HID_CLAIMED_INPUT) 2258 hidinput_disconnect(hdev); 2259 if (hdev->claimed & HID_CLAIMED_HIDDEV) 2260 hdev->hiddev_disconnect(hdev); 2261 if (hdev->claimed & HID_CLAIMED_HIDRAW) 2262 hidraw_disconnect(hdev); 2263 hdev->claimed = 0; 2264 } 2265 EXPORT_SYMBOL_GPL(hid_disconnect); 2266 2267 /** 2268 * hid_hw_start - start underlying HW 2269 * @hdev: hid device 2270 * @connect_mask: which outputs to connect, see HID_CONNECT_* 2271 * 2272 * Call this in probe function *after* hid_parse. This will setup HW 2273 * buffers and start the device (if not defeirred to device open). 2274 * hid_hw_stop must be called if this was successful. 2275 */ 2276 int hid_hw_start(struct hid_device *hdev, unsigned int connect_mask) 2277 { 2278 int error; 2279 2280 error = hdev->ll_driver->start(hdev); 2281 if (error) 2282 return error; 2283 2284 if (connect_mask) { 2285 error = hid_connect(hdev, connect_mask); 2286 if (error) { 2287 hdev->ll_driver->stop(hdev); 2288 return error; 2289 } 2290 } 2291 2292 return 0; 2293 } 2294 EXPORT_SYMBOL_GPL(hid_hw_start); 2295 2296 /** 2297 * hid_hw_stop - stop underlying HW 2298 * @hdev: hid device 2299 * 2300 * This is usually called from remove function or from probe when something 2301 * failed and hid_hw_start was called already. 2302 */ 2303 void hid_hw_stop(struct hid_device *hdev) 2304 { 2305 hid_disconnect(hdev); 2306 hdev->ll_driver->stop(hdev); 2307 } 2308 EXPORT_SYMBOL_GPL(hid_hw_stop); 2309 2310 /** 2311 * hid_hw_open - signal underlying HW to start delivering events 2312 * @hdev: hid device 2313 * 2314 * Tell underlying HW to start delivering events from the device. 2315 * This function should be called sometime after successful call 2316 * to hid_hw_start(). 2317 */ 2318 int hid_hw_open(struct hid_device *hdev) 2319 { 2320 int ret; 2321 2322 ret = mutex_lock_killable(&hdev->ll_open_lock); 2323 if (ret) 2324 return ret; 2325 2326 if (!hdev->ll_open_count++) { 2327 ret = hdev->ll_driver->open(hdev); 2328 if (ret) 2329 hdev->ll_open_count--; 2330 } 2331 2332 mutex_unlock(&hdev->ll_open_lock); 2333 return ret; 2334 } 2335 EXPORT_SYMBOL_GPL(hid_hw_open); 2336 2337 /** 2338 * hid_hw_close - signal underlaying HW to stop delivering events 2339 * 2340 * @hdev: hid device 2341 * 2342 * This function indicates that we are not interested in the events 2343 * from this device anymore. Delivery of events may or may not stop, 2344 * depending on the number of users still outstanding. 2345 */ 2346 void hid_hw_close(struct hid_device *hdev) 2347 { 2348 mutex_lock(&hdev->ll_open_lock); 2349 if (!--hdev->ll_open_count) 2350 hdev->ll_driver->close(hdev); 2351 mutex_unlock(&hdev->ll_open_lock); 2352 } 2353 EXPORT_SYMBOL_GPL(hid_hw_close); 2354 2355 /** 2356 * hid_hw_request - send report request to device 2357 * 2358 * @hdev: hid device 2359 * @report: report to send 2360 * @reqtype: hid request type 2361 */ 2362 void hid_hw_request(struct hid_device *hdev, 2363 struct hid_report *report, enum hid_class_request reqtype) 2364 { 2365 if (hdev->ll_driver->request) 2366 return hdev->ll_driver->request(hdev, report, reqtype); 2367 2368 __hid_request(hdev, report, reqtype); 2369 } 2370 EXPORT_SYMBOL_GPL(hid_hw_request); 2371 2372 /** 2373 * hid_hw_raw_request - send report request to device 2374 * 2375 * @hdev: hid device 2376 * @reportnum: report ID 2377 * @buf: in/out data to transfer 2378 * @len: length of buf 2379 * @rtype: HID report type 2380 * @reqtype: HID_REQ_GET_REPORT or HID_REQ_SET_REPORT 2381 * 2382 * Return: count of data transferred, negative if error 2383 * 2384 * Same behavior as hid_hw_request, but with raw buffers instead. 2385 */ 2386 int hid_hw_raw_request(struct hid_device *hdev, 2387 unsigned char reportnum, __u8 *buf, 2388 size_t len, enum hid_report_type rtype, enum hid_class_request reqtype) 2389 { 2390 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) 2391 return -EINVAL; 2392 2393 return hdev->ll_driver->raw_request(hdev, reportnum, buf, len, 2394 rtype, reqtype); 2395 } 2396 EXPORT_SYMBOL_GPL(hid_hw_raw_request); 2397 2398 /** 2399 * hid_hw_output_report - send output report to device 2400 * 2401 * @hdev: hid device 2402 * @buf: raw data to transfer 2403 * @len: length of buf 2404 * 2405 * Return: count of data transferred, negative if error 2406 */ 2407 int hid_hw_output_report(struct hid_device *hdev, __u8 *buf, size_t len) 2408 { 2409 if (len < 1 || len > HID_MAX_BUFFER_SIZE || !buf) 2410 return -EINVAL; 2411 2412 if (hdev->ll_driver->output_report) 2413 return hdev->ll_driver->output_report(hdev, buf, len); 2414 2415 return -ENOSYS; 2416 } 2417 EXPORT_SYMBOL_GPL(hid_hw_output_report); 2418 2419 #ifdef CONFIG_PM 2420 int hid_driver_suspend(struct hid_device *hdev, pm_message_t state) 2421 { 2422 if (hdev->driver && hdev->driver->suspend) 2423 return hdev->driver->suspend(hdev, state); 2424 2425 return 0; 2426 } 2427 EXPORT_SYMBOL_GPL(hid_driver_suspend); 2428 2429 int hid_driver_reset_resume(struct hid_device *hdev) 2430 { 2431 if (hdev->driver && hdev->driver->reset_resume) 2432 return hdev->driver->reset_resume(hdev); 2433 2434 return 0; 2435 } 2436 EXPORT_SYMBOL_GPL(hid_driver_reset_resume); 2437 2438 int hid_driver_resume(struct hid_device *hdev) 2439 { 2440 if (hdev->driver && hdev->driver->resume) 2441 return hdev->driver->resume(hdev); 2442 2443 return 0; 2444 } 2445 EXPORT_SYMBOL_GPL(hid_driver_resume); 2446 #endif /* CONFIG_PM */ 2447 2448 struct hid_dynid { 2449 struct list_head list; 2450 struct hid_device_id id; 2451 }; 2452 2453 /** 2454 * new_id_store - add a new HID device ID to this driver and re-probe devices 2455 * @drv: target device driver 2456 * @buf: buffer for scanning device ID data 2457 * @count: input size 2458 * 2459 * Adds a new dynamic hid device ID to this driver, 2460 * and causes the driver to probe for all devices again. 2461 */ 2462 static ssize_t new_id_store(struct device_driver *drv, const char *buf, 2463 size_t count) 2464 { 2465 struct hid_driver *hdrv = to_hid_driver(drv); 2466 struct hid_dynid *dynid; 2467 __u32 bus, vendor, product; 2468 unsigned long driver_data = 0; 2469 int ret; 2470 2471 ret = sscanf(buf, "%x %x %x %lx", 2472 &bus, &vendor, &product, &driver_data); 2473 if (ret < 3) 2474 return -EINVAL; 2475 2476 dynid = kzalloc(sizeof(*dynid), GFP_KERNEL); 2477 if (!dynid) 2478 return -ENOMEM; 2479 2480 dynid->id.bus = bus; 2481 dynid->id.group = HID_GROUP_ANY; 2482 dynid->id.vendor = vendor; 2483 dynid->id.product = product; 2484 dynid->id.driver_data = driver_data; 2485 2486 spin_lock(&hdrv->dyn_lock); 2487 list_add_tail(&dynid->list, &hdrv->dyn_list); 2488 spin_unlock(&hdrv->dyn_lock); 2489 2490 ret = driver_attach(&hdrv->driver); 2491 2492 return ret ? : count; 2493 } 2494 static DRIVER_ATTR_WO(new_id); 2495 2496 static struct attribute *hid_drv_attrs[] = { 2497 &driver_attr_new_id.attr, 2498 NULL, 2499 }; 2500 ATTRIBUTE_GROUPS(hid_drv); 2501 2502 static void hid_free_dynids(struct hid_driver *hdrv) 2503 { 2504 struct hid_dynid *dynid, *n; 2505 2506 spin_lock(&hdrv->dyn_lock); 2507 list_for_each_entry_safe(dynid, n, &hdrv->dyn_list, list) { 2508 list_del(&dynid->list); 2509 kfree(dynid); 2510 } 2511 spin_unlock(&hdrv->dyn_lock); 2512 } 2513 2514 const struct hid_device_id *hid_match_device(struct hid_device *hdev, 2515 struct hid_driver *hdrv) 2516 { 2517 struct hid_dynid *dynid; 2518 2519 spin_lock(&hdrv->dyn_lock); 2520 list_for_each_entry(dynid, &hdrv->dyn_list, list) { 2521 if (hid_match_one_id(hdev, &dynid->id)) { 2522 spin_unlock(&hdrv->dyn_lock); 2523 return &dynid->id; 2524 } 2525 } 2526 spin_unlock(&hdrv->dyn_lock); 2527 2528 return hid_match_id(hdev, hdrv->id_table); 2529 } 2530 EXPORT_SYMBOL_GPL(hid_match_device); 2531 2532 static int hid_bus_match(struct device *dev, struct device_driver *drv) 2533 { 2534 struct hid_driver *hdrv = to_hid_driver(drv); 2535 struct hid_device *hdev = to_hid_device(dev); 2536 2537 return hid_match_device(hdev, hdrv) != NULL; 2538 } 2539 2540 /** 2541 * hid_compare_device_paths - check if both devices share the same path 2542 * @hdev_a: hid device 2543 * @hdev_b: hid device 2544 * @separator: char to use as separator 2545 * 2546 * Check if two devices share the same path up to the last occurrence of 2547 * the separator char. Both paths must exist (i.e., zero-length paths 2548 * don't match). 2549 */ 2550 bool hid_compare_device_paths(struct hid_device *hdev_a, 2551 struct hid_device *hdev_b, char separator) 2552 { 2553 int n1 = strrchr(hdev_a->phys, separator) - hdev_a->phys; 2554 int n2 = strrchr(hdev_b->phys, separator) - hdev_b->phys; 2555 2556 if (n1 != n2 || n1 <= 0 || n2 <= 0) 2557 return false; 2558 2559 return !strncmp(hdev_a->phys, hdev_b->phys, n1); 2560 } 2561 EXPORT_SYMBOL_GPL(hid_compare_device_paths); 2562 2563 static int hid_device_probe(struct device *dev) 2564 { 2565 struct hid_driver *hdrv = to_hid_driver(dev->driver); 2566 struct hid_device *hdev = to_hid_device(dev); 2567 const struct hid_device_id *id; 2568 int ret = 0; 2569 2570 if (down_interruptible(&hdev->driver_input_lock)) { 2571 ret = -EINTR; 2572 goto end; 2573 } 2574 hdev->io_started = false; 2575 2576 clear_bit(ffs(HID_STAT_REPROBED), &hdev->status); 2577 2578 if (!hdev->driver) { 2579 id = hid_match_device(hdev, hdrv); 2580 if (id == NULL) { 2581 ret = -ENODEV; 2582 goto unlock; 2583 } 2584 2585 if (hdrv->match) { 2586 if (!hdrv->match(hdev, hid_ignore_special_drivers)) { 2587 ret = -ENODEV; 2588 goto unlock; 2589 } 2590 } else { 2591 /* 2592 * hid-generic implements .match(), so if 2593 * hid_ignore_special_drivers is set, we can safely 2594 * return. 2595 */ 2596 if (hid_ignore_special_drivers) { 2597 ret = -ENODEV; 2598 goto unlock; 2599 } 2600 } 2601 2602 /* reset the quirks that has been previously set */ 2603 hdev->quirks = hid_lookup_quirk(hdev); 2604 hdev->driver = hdrv; 2605 if (hdrv->probe) { 2606 ret = hdrv->probe(hdev, id); 2607 } else { /* default probe */ 2608 ret = hid_open_report(hdev); 2609 if (!ret) 2610 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); 2611 } 2612 if (ret) { 2613 hid_close_report(hdev); 2614 hdev->driver = NULL; 2615 } 2616 } 2617 unlock: 2618 if (!hdev->io_started) 2619 up(&hdev->driver_input_lock); 2620 end: 2621 return ret; 2622 } 2623 2624 static void hid_device_remove(struct device *dev) 2625 { 2626 struct hid_device *hdev = to_hid_device(dev); 2627 struct hid_driver *hdrv; 2628 2629 down(&hdev->driver_input_lock); 2630 hdev->io_started = false; 2631 2632 hdrv = hdev->driver; 2633 if (hdrv) { 2634 if (hdrv->remove) 2635 hdrv->remove(hdev); 2636 else /* default remove */ 2637 hid_hw_stop(hdev); 2638 hid_close_report(hdev); 2639 hdev->driver = NULL; 2640 } 2641 2642 if (!hdev->io_started) 2643 up(&hdev->driver_input_lock); 2644 } 2645 2646 static ssize_t modalias_show(struct device *dev, struct device_attribute *a, 2647 char *buf) 2648 { 2649 struct hid_device *hdev = container_of(dev, struct hid_device, dev); 2650 2651 return scnprintf(buf, PAGE_SIZE, "hid:b%04Xg%04Xv%08Xp%08X\n", 2652 hdev->bus, hdev->group, hdev->vendor, hdev->product); 2653 } 2654 static DEVICE_ATTR_RO(modalias); 2655 2656 static struct attribute *hid_dev_attrs[] = { 2657 &dev_attr_modalias.attr, 2658 NULL, 2659 }; 2660 static struct bin_attribute *hid_dev_bin_attrs[] = { 2661 &dev_bin_attr_report_desc, 2662 NULL 2663 }; 2664 static const struct attribute_group hid_dev_group = { 2665 .attrs = hid_dev_attrs, 2666 .bin_attrs = hid_dev_bin_attrs, 2667 }; 2668 __ATTRIBUTE_GROUPS(hid_dev); 2669 2670 static int hid_uevent(struct device *dev, struct kobj_uevent_env *env) 2671 { 2672 struct hid_device *hdev = to_hid_device(dev); 2673 2674 if (add_uevent_var(env, "HID_ID=%04X:%08X:%08X", 2675 hdev->bus, hdev->vendor, hdev->product)) 2676 return -ENOMEM; 2677 2678 if (add_uevent_var(env, "HID_NAME=%s", hdev->name)) 2679 return -ENOMEM; 2680 2681 if (add_uevent_var(env, "HID_PHYS=%s", hdev->phys)) 2682 return -ENOMEM; 2683 2684 if (add_uevent_var(env, "HID_UNIQ=%s", hdev->uniq)) 2685 return -ENOMEM; 2686 2687 if (add_uevent_var(env, "MODALIAS=hid:b%04Xg%04Xv%08Xp%08X", 2688 hdev->bus, hdev->group, hdev->vendor, hdev->product)) 2689 return -ENOMEM; 2690 2691 return 0; 2692 } 2693 2694 struct bus_type hid_bus_type = { 2695 .name = "hid", 2696 .dev_groups = hid_dev_groups, 2697 .drv_groups = hid_drv_groups, 2698 .match = hid_bus_match, 2699 .probe = hid_device_probe, 2700 .remove = hid_device_remove, 2701 .uevent = hid_uevent, 2702 }; 2703 EXPORT_SYMBOL(hid_bus_type); 2704 2705 int hid_add_device(struct hid_device *hdev) 2706 { 2707 static atomic_t id = ATOMIC_INIT(0); 2708 int ret; 2709 2710 if (WARN_ON(hdev->status & HID_STAT_ADDED)) 2711 return -EBUSY; 2712 2713 hdev->quirks = hid_lookup_quirk(hdev); 2714 2715 /* we need to kill them here, otherwise they will stay allocated to 2716 * wait for coming driver */ 2717 if (hid_ignore(hdev)) 2718 return -ENODEV; 2719 2720 /* 2721 * Check for the mandatory transport channel. 2722 */ 2723 if (!hdev->ll_driver->raw_request) { 2724 hid_err(hdev, "transport driver missing .raw_request()\n"); 2725 return -EINVAL; 2726 } 2727 2728 /* 2729 * Read the device report descriptor once and use as template 2730 * for the driver-specific modifications. 2731 */ 2732 ret = hdev->ll_driver->parse(hdev); 2733 if (ret) 2734 return ret; 2735 if (!hdev->dev_rdesc) 2736 return -ENODEV; 2737 2738 /* 2739 * Scan generic devices for group information 2740 */ 2741 if (hid_ignore_special_drivers) { 2742 hdev->group = HID_GROUP_GENERIC; 2743 } else if (!hdev->group && 2744 !(hdev->quirks & HID_QUIRK_HAVE_SPECIAL_DRIVER)) { 2745 ret = hid_scan_report(hdev); 2746 if (ret) 2747 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2748 } 2749 2750 hdev->id = atomic_inc_return(&id); 2751 2752 /* XXX hack, any other cleaner solution after the driver core 2753 * is converted to allow more than 20 bytes as the device name? */ 2754 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus, 2755 hdev->vendor, hdev->product, hdev->id); 2756 2757 hid_debug_register(hdev, dev_name(&hdev->dev)); 2758 ret = device_add(&hdev->dev); 2759 if (!ret) 2760 hdev->status |= HID_STAT_ADDED; 2761 else 2762 hid_debug_unregister(hdev); 2763 2764 return ret; 2765 } 2766 EXPORT_SYMBOL_GPL(hid_add_device); 2767 2768 /** 2769 * hid_allocate_device - allocate new hid device descriptor 2770 * 2771 * Allocate and initialize hid device, so that hid_destroy_device might be 2772 * used to free it. 2773 * 2774 * New hid_device pointer is returned on success, otherwise ERR_PTR encoded 2775 * error value. 2776 */ 2777 struct hid_device *hid_allocate_device(void) 2778 { 2779 struct hid_device *hdev; 2780 int ret = -ENOMEM; 2781 2782 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL); 2783 if (hdev == NULL) 2784 return ERR_PTR(ret); 2785 2786 device_initialize(&hdev->dev); 2787 hdev->dev.release = hid_device_release; 2788 hdev->dev.bus = &hid_bus_type; 2789 device_enable_async_suspend(&hdev->dev); 2790 2791 hid_close_report(hdev); 2792 2793 init_waitqueue_head(&hdev->debug_wait); 2794 INIT_LIST_HEAD(&hdev->debug_list); 2795 spin_lock_init(&hdev->debug_list_lock); 2796 sema_init(&hdev->driver_input_lock, 1); 2797 mutex_init(&hdev->ll_open_lock); 2798 2799 return hdev; 2800 } 2801 EXPORT_SYMBOL_GPL(hid_allocate_device); 2802 2803 static void hid_remove_device(struct hid_device *hdev) 2804 { 2805 if (hdev->status & HID_STAT_ADDED) { 2806 device_del(&hdev->dev); 2807 hid_debug_unregister(hdev); 2808 hdev->status &= ~HID_STAT_ADDED; 2809 } 2810 kfree(hdev->dev_rdesc); 2811 hdev->dev_rdesc = NULL; 2812 hdev->dev_rsize = 0; 2813 } 2814 2815 /** 2816 * hid_destroy_device - free previously allocated device 2817 * 2818 * @hdev: hid device 2819 * 2820 * If you allocate hid_device through hid_allocate_device, you should ever 2821 * free by this function. 2822 */ 2823 void hid_destroy_device(struct hid_device *hdev) 2824 { 2825 hid_remove_device(hdev); 2826 put_device(&hdev->dev); 2827 } 2828 EXPORT_SYMBOL_GPL(hid_destroy_device); 2829 2830 2831 static int __hid_bus_reprobe_drivers(struct device *dev, void *data) 2832 { 2833 struct hid_driver *hdrv = data; 2834 struct hid_device *hdev = to_hid_device(dev); 2835 2836 if (hdev->driver == hdrv && 2837 !hdrv->match(hdev, hid_ignore_special_drivers) && 2838 !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status)) 2839 return device_reprobe(dev); 2840 2841 return 0; 2842 } 2843 2844 static int __hid_bus_driver_added(struct device_driver *drv, void *data) 2845 { 2846 struct hid_driver *hdrv = to_hid_driver(drv); 2847 2848 if (hdrv->match) { 2849 bus_for_each_dev(&hid_bus_type, NULL, hdrv, 2850 __hid_bus_reprobe_drivers); 2851 } 2852 2853 return 0; 2854 } 2855 2856 static int __bus_removed_driver(struct device_driver *drv, void *data) 2857 { 2858 return bus_rescan_devices(&hid_bus_type); 2859 } 2860 2861 int __hid_register_driver(struct hid_driver *hdrv, struct module *owner, 2862 const char *mod_name) 2863 { 2864 int ret; 2865 2866 hdrv->driver.name = hdrv->name; 2867 hdrv->driver.bus = &hid_bus_type; 2868 hdrv->driver.owner = owner; 2869 hdrv->driver.mod_name = mod_name; 2870 2871 INIT_LIST_HEAD(&hdrv->dyn_list); 2872 spin_lock_init(&hdrv->dyn_lock); 2873 2874 ret = driver_register(&hdrv->driver); 2875 2876 if (ret == 0) 2877 bus_for_each_drv(&hid_bus_type, NULL, NULL, 2878 __hid_bus_driver_added); 2879 2880 return ret; 2881 } 2882 EXPORT_SYMBOL_GPL(__hid_register_driver); 2883 2884 void hid_unregister_driver(struct hid_driver *hdrv) 2885 { 2886 driver_unregister(&hdrv->driver); 2887 hid_free_dynids(hdrv); 2888 2889 bus_for_each_drv(&hid_bus_type, NULL, hdrv, __bus_removed_driver); 2890 } 2891 EXPORT_SYMBOL_GPL(hid_unregister_driver); 2892 2893 int hid_check_keys_pressed(struct hid_device *hid) 2894 { 2895 struct hid_input *hidinput; 2896 int i; 2897 2898 if (!(hid->claimed & HID_CLAIMED_INPUT)) 2899 return 0; 2900 2901 list_for_each_entry(hidinput, &hid->inputs, list) { 2902 for (i = 0; i < BITS_TO_LONGS(KEY_MAX); i++) 2903 if (hidinput->input->key[i]) 2904 return 1; 2905 } 2906 2907 return 0; 2908 } 2909 EXPORT_SYMBOL_GPL(hid_check_keys_pressed); 2910 2911 static int __init hid_init(void) 2912 { 2913 int ret; 2914 2915 if (hid_debug) 2916 pr_warn("hid_debug is now used solely for parser and driver debugging.\n" 2917 "debugfs is now used for inspecting the device (report descriptor, reports)\n"); 2918 2919 ret = bus_register(&hid_bus_type); 2920 if (ret) { 2921 pr_err("can't register hid bus\n"); 2922 goto err; 2923 } 2924 2925 ret = hidraw_init(); 2926 if (ret) 2927 goto err_bus; 2928 2929 hid_debug_init(); 2930 2931 return 0; 2932 err_bus: 2933 bus_unregister(&hid_bus_type); 2934 err: 2935 return ret; 2936 } 2937 2938 static void __exit hid_exit(void) 2939 { 2940 hid_debug_exit(); 2941 hidraw_exit(); 2942 bus_unregister(&hid_bus_type); 2943 hid_quirks_exit(HID_BUS_ANY); 2944 } 2945 2946 module_init(hid_init); 2947 module_exit(hid_exit); 2948 2949 MODULE_AUTHOR("Andreas Gal"); 2950 MODULE_AUTHOR("Vojtech Pavlik"); 2951 MODULE_AUTHOR("Jiri Kosina"); 2952 MODULE_LICENSE("GPL"); 2953