1 /* 2 * The input core 3 * 4 * Copyright (c) 1999-2002 Vojtech Pavlik 5 */ 6 7 /* 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License version 2 as published by 10 * the Free Software Foundation. 11 */ 12 13 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 14 15 #include <linux/init.h> 16 #include <linux/types.h> 17 #include <linux/idr.h> 18 #include <linux/input/mt.h> 19 #include <linux/module.h> 20 #include <linux/slab.h> 21 #include <linux/random.h> 22 #include <linux/major.h> 23 #include <linux/proc_fs.h> 24 #include <linux/sched.h> 25 #include <linux/seq_file.h> 26 #include <linux/poll.h> 27 #include <linux/device.h> 28 #include <linux/mutex.h> 29 #include <linux/rcupdate.h> 30 #include "input-compat.h" 31 32 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 33 MODULE_DESCRIPTION("Input core"); 34 MODULE_LICENSE("GPL"); 35 36 #define INPUT_MAX_CHAR_DEVICES 1024 37 #define INPUT_FIRST_DYNAMIC_DEV 256 38 static DEFINE_IDA(input_ida); 39 40 static LIST_HEAD(input_dev_list); 41 static LIST_HEAD(input_handler_list); 42 43 /* 44 * input_mutex protects access to both input_dev_list and input_handler_list. 45 * This also causes input_[un]register_device and input_[un]register_handler 46 * be mutually exclusive which simplifies locking in drivers implementing 47 * input handlers. 48 */ 49 static DEFINE_MUTEX(input_mutex); 50 51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 52 53 static inline int is_event_supported(unsigned int code, 54 unsigned long *bm, unsigned int max) 55 { 56 return code <= max && test_bit(code, bm); 57 } 58 59 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 60 { 61 if (fuzz) { 62 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 63 return old_val; 64 65 if (value > old_val - fuzz && value < old_val + fuzz) 66 return (old_val * 3 + value) / 4; 67 68 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 69 return (old_val + value) / 2; 70 } 71 72 return value; 73 } 74 75 static void input_start_autorepeat(struct input_dev *dev, int code) 76 { 77 if (test_bit(EV_REP, dev->evbit) && 78 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 79 dev->timer.function) { 80 dev->repeat_key = code; 81 mod_timer(&dev->timer, 82 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 83 } 84 } 85 86 static void input_stop_autorepeat(struct input_dev *dev) 87 { 88 del_timer(&dev->timer); 89 } 90 91 /* 92 * Pass event first through all filters and then, if event has not been 93 * filtered out, through all open handles. This function is called with 94 * dev->event_lock held and interrupts disabled. 95 */ 96 static unsigned int input_to_handler(struct input_handle *handle, 97 struct input_value *vals, unsigned int count) 98 { 99 struct input_handler *handler = handle->handler; 100 struct input_value *end = vals; 101 struct input_value *v; 102 103 if (handler->filter) { 104 for (v = vals; v != vals + count; v++) { 105 if (handler->filter(handle, v->type, v->code, v->value)) 106 continue; 107 if (end != v) 108 *end = *v; 109 end++; 110 } 111 count = end - vals; 112 } 113 114 if (!count) 115 return 0; 116 117 if (handler->events) 118 handler->events(handle, vals, count); 119 else if (handler->event) 120 for (v = vals; v != vals + count; v++) 121 handler->event(handle, v->type, v->code, v->value); 122 123 return count; 124 } 125 126 /* 127 * Pass values first through all filters and then, if event has not been 128 * filtered out, through all open handles. This function is called with 129 * dev->event_lock held and interrupts disabled. 130 */ 131 static void input_pass_values(struct input_dev *dev, 132 struct input_value *vals, unsigned int count) 133 { 134 struct input_handle *handle; 135 struct input_value *v; 136 137 if (!count) 138 return; 139 140 rcu_read_lock(); 141 142 handle = rcu_dereference(dev->grab); 143 if (handle) { 144 count = input_to_handler(handle, vals, count); 145 } else { 146 list_for_each_entry_rcu(handle, &dev->h_list, d_node) 147 if (handle->open) { 148 count = input_to_handler(handle, vals, count); 149 if (!count) 150 break; 151 } 152 } 153 154 rcu_read_unlock(); 155 156 /* trigger auto repeat for key events */ 157 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 158 for (v = vals; v != vals + count; v++) { 159 if (v->type == EV_KEY && v->value != 2) { 160 if (v->value) 161 input_start_autorepeat(dev, v->code); 162 else 163 input_stop_autorepeat(dev); 164 } 165 } 166 } 167 } 168 169 static void input_pass_event(struct input_dev *dev, 170 unsigned int type, unsigned int code, int value) 171 { 172 struct input_value vals[] = { { type, code, value } }; 173 174 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 175 } 176 177 /* 178 * Generate software autorepeat event. Note that we take 179 * dev->event_lock here to avoid racing with input_event 180 * which may cause keys get "stuck". 181 */ 182 static void input_repeat_key(struct timer_list *t) 183 { 184 struct input_dev *dev = from_timer(dev, t, timer); 185 unsigned long flags; 186 187 spin_lock_irqsave(&dev->event_lock, flags); 188 189 if (test_bit(dev->repeat_key, dev->key) && 190 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 191 struct input_value vals[] = { 192 { EV_KEY, dev->repeat_key, 2 }, 193 input_value_sync 194 }; 195 196 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 197 198 if (dev->rep[REP_PERIOD]) 199 mod_timer(&dev->timer, jiffies + 200 msecs_to_jiffies(dev->rep[REP_PERIOD])); 201 } 202 203 spin_unlock_irqrestore(&dev->event_lock, flags); 204 } 205 206 #define INPUT_IGNORE_EVENT 0 207 #define INPUT_PASS_TO_HANDLERS 1 208 #define INPUT_PASS_TO_DEVICE 2 209 #define INPUT_SLOT 4 210 #define INPUT_FLUSH 8 211 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 212 213 static int input_handle_abs_event(struct input_dev *dev, 214 unsigned int code, int *pval) 215 { 216 struct input_mt *mt = dev->mt; 217 bool is_mt_event; 218 int *pold; 219 220 if (code == ABS_MT_SLOT) { 221 /* 222 * "Stage" the event; we'll flush it later, when we 223 * get actual touch data. 224 */ 225 if (mt && *pval >= 0 && *pval < mt->num_slots) 226 mt->slot = *pval; 227 228 return INPUT_IGNORE_EVENT; 229 } 230 231 is_mt_event = input_is_mt_value(code); 232 233 if (!is_mt_event) { 234 pold = &dev->absinfo[code].value; 235 } else if (mt) { 236 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 237 } else { 238 /* 239 * Bypass filtering for multi-touch events when 240 * not employing slots. 241 */ 242 pold = NULL; 243 } 244 245 if (pold) { 246 *pval = input_defuzz_abs_event(*pval, *pold, 247 dev->absinfo[code].fuzz); 248 if (*pold == *pval) 249 return INPUT_IGNORE_EVENT; 250 251 *pold = *pval; 252 } 253 254 /* Flush pending "slot" event */ 255 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 256 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot); 257 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 258 } 259 260 return INPUT_PASS_TO_HANDLERS; 261 } 262 263 static int input_get_disposition(struct input_dev *dev, 264 unsigned int type, unsigned int code, int *pval) 265 { 266 int disposition = INPUT_IGNORE_EVENT; 267 int value = *pval; 268 269 switch (type) { 270 271 case EV_SYN: 272 switch (code) { 273 case SYN_CONFIG: 274 disposition = INPUT_PASS_TO_ALL; 275 break; 276 277 case SYN_REPORT: 278 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 279 break; 280 case SYN_MT_REPORT: 281 disposition = INPUT_PASS_TO_HANDLERS; 282 break; 283 } 284 break; 285 286 case EV_KEY: 287 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 288 289 /* auto-repeat bypasses state updates */ 290 if (value == 2) { 291 disposition = INPUT_PASS_TO_HANDLERS; 292 break; 293 } 294 295 if (!!test_bit(code, dev->key) != !!value) { 296 297 __change_bit(code, dev->key); 298 disposition = INPUT_PASS_TO_HANDLERS; 299 } 300 } 301 break; 302 303 case EV_SW: 304 if (is_event_supported(code, dev->swbit, SW_MAX) && 305 !!test_bit(code, dev->sw) != !!value) { 306 307 __change_bit(code, dev->sw); 308 disposition = INPUT_PASS_TO_HANDLERS; 309 } 310 break; 311 312 case EV_ABS: 313 if (is_event_supported(code, dev->absbit, ABS_MAX)) 314 disposition = input_handle_abs_event(dev, code, &value); 315 316 break; 317 318 case EV_REL: 319 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 320 disposition = INPUT_PASS_TO_HANDLERS; 321 322 break; 323 324 case EV_MSC: 325 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 326 disposition = INPUT_PASS_TO_ALL; 327 328 break; 329 330 case EV_LED: 331 if (is_event_supported(code, dev->ledbit, LED_MAX) && 332 !!test_bit(code, dev->led) != !!value) { 333 334 __change_bit(code, dev->led); 335 disposition = INPUT_PASS_TO_ALL; 336 } 337 break; 338 339 case EV_SND: 340 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 341 342 if (!!test_bit(code, dev->snd) != !!value) 343 __change_bit(code, dev->snd); 344 disposition = INPUT_PASS_TO_ALL; 345 } 346 break; 347 348 case EV_REP: 349 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 350 dev->rep[code] = value; 351 disposition = INPUT_PASS_TO_ALL; 352 } 353 break; 354 355 case EV_FF: 356 if (value >= 0) 357 disposition = INPUT_PASS_TO_ALL; 358 break; 359 360 case EV_PWR: 361 disposition = INPUT_PASS_TO_ALL; 362 break; 363 } 364 365 *pval = value; 366 return disposition; 367 } 368 369 static void input_handle_event(struct input_dev *dev, 370 unsigned int type, unsigned int code, int value) 371 { 372 int disposition = input_get_disposition(dev, type, code, &value); 373 374 if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) 375 add_input_randomness(type, code, value); 376 377 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 378 dev->event(dev, type, code, value); 379 380 if (!dev->vals) 381 return; 382 383 if (disposition & INPUT_PASS_TO_HANDLERS) { 384 struct input_value *v; 385 386 if (disposition & INPUT_SLOT) { 387 v = &dev->vals[dev->num_vals++]; 388 v->type = EV_ABS; 389 v->code = ABS_MT_SLOT; 390 v->value = dev->mt->slot; 391 } 392 393 v = &dev->vals[dev->num_vals++]; 394 v->type = type; 395 v->code = code; 396 v->value = value; 397 } 398 399 if (disposition & INPUT_FLUSH) { 400 if (dev->num_vals >= 2) 401 input_pass_values(dev, dev->vals, dev->num_vals); 402 dev->num_vals = 0; 403 } else if (dev->num_vals >= dev->max_vals - 2) { 404 dev->vals[dev->num_vals++] = input_value_sync; 405 input_pass_values(dev, dev->vals, dev->num_vals); 406 dev->num_vals = 0; 407 } 408 409 } 410 411 /** 412 * input_event() - report new input event 413 * @dev: device that generated the event 414 * @type: type of the event 415 * @code: event code 416 * @value: value of the event 417 * 418 * This function should be used by drivers implementing various input 419 * devices to report input events. See also input_inject_event(). 420 * 421 * NOTE: input_event() may be safely used right after input device was 422 * allocated with input_allocate_device(), even before it is registered 423 * with input_register_device(), but the event will not reach any of the 424 * input handlers. Such early invocation of input_event() may be used 425 * to 'seed' initial state of a switch or initial position of absolute 426 * axis, etc. 427 */ 428 void input_event(struct input_dev *dev, 429 unsigned int type, unsigned int code, int value) 430 { 431 unsigned long flags; 432 433 if (is_event_supported(type, dev->evbit, EV_MAX)) { 434 435 spin_lock_irqsave(&dev->event_lock, flags); 436 input_handle_event(dev, type, code, value); 437 spin_unlock_irqrestore(&dev->event_lock, flags); 438 } 439 } 440 EXPORT_SYMBOL(input_event); 441 442 /** 443 * input_inject_event() - send input event from input handler 444 * @handle: input handle to send event through 445 * @type: type of the event 446 * @code: event code 447 * @value: value of the event 448 * 449 * Similar to input_event() but will ignore event if device is 450 * "grabbed" and handle injecting event is not the one that owns 451 * the device. 452 */ 453 void input_inject_event(struct input_handle *handle, 454 unsigned int type, unsigned int code, int value) 455 { 456 struct input_dev *dev = handle->dev; 457 struct input_handle *grab; 458 unsigned long flags; 459 460 if (is_event_supported(type, dev->evbit, EV_MAX)) { 461 spin_lock_irqsave(&dev->event_lock, flags); 462 463 rcu_read_lock(); 464 grab = rcu_dereference(dev->grab); 465 if (!grab || grab == handle) 466 input_handle_event(dev, type, code, value); 467 rcu_read_unlock(); 468 469 spin_unlock_irqrestore(&dev->event_lock, flags); 470 } 471 } 472 EXPORT_SYMBOL(input_inject_event); 473 474 /** 475 * input_alloc_absinfo - allocates array of input_absinfo structs 476 * @dev: the input device emitting absolute events 477 * 478 * If the absinfo struct the caller asked for is already allocated, this 479 * functions will not do anything. 480 */ 481 void input_alloc_absinfo(struct input_dev *dev) 482 { 483 if (!dev->absinfo) 484 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), 485 GFP_KERNEL); 486 487 WARN(!dev->absinfo, "%s(): kcalloc() failed?\n", __func__); 488 } 489 EXPORT_SYMBOL(input_alloc_absinfo); 490 491 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 492 int min, int max, int fuzz, int flat) 493 { 494 struct input_absinfo *absinfo; 495 496 input_alloc_absinfo(dev); 497 if (!dev->absinfo) 498 return; 499 500 absinfo = &dev->absinfo[axis]; 501 absinfo->minimum = min; 502 absinfo->maximum = max; 503 absinfo->fuzz = fuzz; 504 absinfo->flat = flat; 505 506 __set_bit(EV_ABS, dev->evbit); 507 __set_bit(axis, dev->absbit); 508 } 509 EXPORT_SYMBOL(input_set_abs_params); 510 511 512 /** 513 * input_grab_device - grabs device for exclusive use 514 * @handle: input handle that wants to own the device 515 * 516 * When a device is grabbed by an input handle all events generated by 517 * the device are delivered only to this handle. Also events injected 518 * by other input handles are ignored while device is grabbed. 519 */ 520 int input_grab_device(struct input_handle *handle) 521 { 522 struct input_dev *dev = handle->dev; 523 int retval; 524 525 retval = mutex_lock_interruptible(&dev->mutex); 526 if (retval) 527 return retval; 528 529 if (dev->grab) { 530 retval = -EBUSY; 531 goto out; 532 } 533 534 rcu_assign_pointer(dev->grab, handle); 535 536 out: 537 mutex_unlock(&dev->mutex); 538 return retval; 539 } 540 EXPORT_SYMBOL(input_grab_device); 541 542 static void __input_release_device(struct input_handle *handle) 543 { 544 struct input_dev *dev = handle->dev; 545 struct input_handle *grabber; 546 547 grabber = rcu_dereference_protected(dev->grab, 548 lockdep_is_held(&dev->mutex)); 549 if (grabber == handle) { 550 rcu_assign_pointer(dev->grab, NULL); 551 /* Make sure input_pass_event() notices that grab is gone */ 552 synchronize_rcu(); 553 554 list_for_each_entry(handle, &dev->h_list, d_node) 555 if (handle->open && handle->handler->start) 556 handle->handler->start(handle); 557 } 558 } 559 560 /** 561 * input_release_device - release previously grabbed device 562 * @handle: input handle that owns the device 563 * 564 * Releases previously grabbed device so that other input handles can 565 * start receiving input events. Upon release all handlers attached 566 * to the device have their start() method called so they have a change 567 * to synchronize device state with the rest of the system. 568 */ 569 void input_release_device(struct input_handle *handle) 570 { 571 struct input_dev *dev = handle->dev; 572 573 mutex_lock(&dev->mutex); 574 __input_release_device(handle); 575 mutex_unlock(&dev->mutex); 576 } 577 EXPORT_SYMBOL(input_release_device); 578 579 /** 580 * input_open_device - open input device 581 * @handle: handle through which device is being accessed 582 * 583 * This function should be called by input handlers when they 584 * want to start receive events from given input device. 585 */ 586 int input_open_device(struct input_handle *handle) 587 { 588 struct input_dev *dev = handle->dev; 589 int retval; 590 591 retval = mutex_lock_interruptible(&dev->mutex); 592 if (retval) 593 return retval; 594 595 if (dev->going_away) { 596 retval = -ENODEV; 597 goto out; 598 } 599 600 handle->open++; 601 602 if (!dev->users++ && dev->open) 603 retval = dev->open(dev); 604 605 if (retval) { 606 dev->users--; 607 if (!--handle->open) { 608 /* 609 * Make sure we are not delivering any more events 610 * through this handle 611 */ 612 synchronize_rcu(); 613 } 614 } 615 616 out: 617 mutex_unlock(&dev->mutex); 618 return retval; 619 } 620 EXPORT_SYMBOL(input_open_device); 621 622 int input_flush_device(struct input_handle *handle, struct file *file) 623 { 624 struct input_dev *dev = handle->dev; 625 int retval; 626 627 retval = mutex_lock_interruptible(&dev->mutex); 628 if (retval) 629 return retval; 630 631 if (dev->flush) 632 retval = dev->flush(dev, file); 633 634 mutex_unlock(&dev->mutex); 635 return retval; 636 } 637 EXPORT_SYMBOL(input_flush_device); 638 639 /** 640 * input_close_device - close input device 641 * @handle: handle through which device is being accessed 642 * 643 * This function should be called by input handlers when they 644 * want to stop receive events from given input device. 645 */ 646 void input_close_device(struct input_handle *handle) 647 { 648 struct input_dev *dev = handle->dev; 649 650 mutex_lock(&dev->mutex); 651 652 __input_release_device(handle); 653 654 if (!--dev->users && dev->close) 655 dev->close(dev); 656 657 if (!--handle->open) { 658 /* 659 * synchronize_rcu() makes sure that input_pass_event() 660 * completed and that no more input events are delivered 661 * through this handle 662 */ 663 synchronize_rcu(); 664 } 665 666 mutex_unlock(&dev->mutex); 667 } 668 EXPORT_SYMBOL(input_close_device); 669 670 /* 671 * Simulate keyup events for all keys that are marked as pressed. 672 * The function must be called with dev->event_lock held. 673 */ 674 static void input_dev_release_keys(struct input_dev *dev) 675 { 676 bool need_sync = false; 677 int code; 678 679 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 680 for_each_set_bit(code, dev->key, KEY_CNT) { 681 input_pass_event(dev, EV_KEY, code, 0); 682 need_sync = true; 683 } 684 685 if (need_sync) 686 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 687 688 memset(dev->key, 0, sizeof(dev->key)); 689 } 690 } 691 692 /* 693 * Prepare device for unregistering 694 */ 695 static void input_disconnect_device(struct input_dev *dev) 696 { 697 struct input_handle *handle; 698 699 /* 700 * Mark device as going away. Note that we take dev->mutex here 701 * not to protect access to dev->going_away but rather to ensure 702 * that there are no threads in the middle of input_open_device() 703 */ 704 mutex_lock(&dev->mutex); 705 dev->going_away = true; 706 mutex_unlock(&dev->mutex); 707 708 spin_lock_irq(&dev->event_lock); 709 710 /* 711 * Simulate keyup events for all pressed keys so that handlers 712 * are not left with "stuck" keys. The driver may continue 713 * generate events even after we done here but they will not 714 * reach any handlers. 715 */ 716 input_dev_release_keys(dev); 717 718 list_for_each_entry(handle, &dev->h_list, d_node) 719 handle->open = 0; 720 721 spin_unlock_irq(&dev->event_lock); 722 } 723 724 /** 725 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 726 * @ke: keymap entry containing scancode to be converted. 727 * @scancode: pointer to the location where converted scancode should 728 * be stored. 729 * 730 * This function is used to convert scancode stored in &struct keymap_entry 731 * into scalar form understood by legacy keymap handling methods. These 732 * methods expect scancodes to be represented as 'unsigned int'. 733 */ 734 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 735 unsigned int *scancode) 736 { 737 switch (ke->len) { 738 case 1: 739 *scancode = *((u8 *)ke->scancode); 740 break; 741 742 case 2: 743 *scancode = *((u16 *)ke->scancode); 744 break; 745 746 case 4: 747 *scancode = *((u32 *)ke->scancode); 748 break; 749 750 default: 751 return -EINVAL; 752 } 753 754 return 0; 755 } 756 EXPORT_SYMBOL(input_scancode_to_scalar); 757 758 /* 759 * Those routines handle the default case where no [gs]etkeycode() is 760 * defined. In this case, an array indexed by the scancode is used. 761 */ 762 763 static unsigned int input_fetch_keycode(struct input_dev *dev, 764 unsigned int index) 765 { 766 switch (dev->keycodesize) { 767 case 1: 768 return ((u8 *)dev->keycode)[index]; 769 770 case 2: 771 return ((u16 *)dev->keycode)[index]; 772 773 default: 774 return ((u32 *)dev->keycode)[index]; 775 } 776 } 777 778 static int input_default_getkeycode(struct input_dev *dev, 779 struct input_keymap_entry *ke) 780 { 781 unsigned int index; 782 int error; 783 784 if (!dev->keycodesize) 785 return -EINVAL; 786 787 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 788 index = ke->index; 789 else { 790 error = input_scancode_to_scalar(ke, &index); 791 if (error) 792 return error; 793 } 794 795 if (index >= dev->keycodemax) 796 return -EINVAL; 797 798 ke->keycode = input_fetch_keycode(dev, index); 799 ke->index = index; 800 ke->len = sizeof(index); 801 memcpy(ke->scancode, &index, sizeof(index)); 802 803 return 0; 804 } 805 806 static int input_default_setkeycode(struct input_dev *dev, 807 const struct input_keymap_entry *ke, 808 unsigned int *old_keycode) 809 { 810 unsigned int index; 811 int error; 812 int i; 813 814 if (!dev->keycodesize) 815 return -EINVAL; 816 817 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 818 index = ke->index; 819 } else { 820 error = input_scancode_to_scalar(ke, &index); 821 if (error) 822 return error; 823 } 824 825 if (index >= dev->keycodemax) 826 return -EINVAL; 827 828 if (dev->keycodesize < sizeof(ke->keycode) && 829 (ke->keycode >> (dev->keycodesize * 8))) 830 return -EINVAL; 831 832 switch (dev->keycodesize) { 833 case 1: { 834 u8 *k = (u8 *)dev->keycode; 835 *old_keycode = k[index]; 836 k[index] = ke->keycode; 837 break; 838 } 839 case 2: { 840 u16 *k = (u16 *)dev->keycode; 841 *old_keycode = k[index]; 842 k[index] = ke->keycode; 843 break; 844 } 845 default: { 846 u32 *k = (u32 *)dev->keycode; 847 *old_keycode = k[index]; 848 k[index] = ke->keycode; 849 break; 850 } 851 } 852 853 __clear_bit(*old_keycode, dev->keybit); 854 __set_bit(ke->keycode, dev->keybit); 855 856 for (i = 0; i < dev->keycodemax; i++) { 857 if (input_fetch_keycode(dev, i) == *old_keycode) { 858 __set_bit(*old_keycode, dev->keybit); 859 break; /* Setting the bit twice is useless, so break */ 860 } 861 } 862 863 return 0; 864 } 865 866 /** 867 * input_get_keycode - retrieve keycode currently mapped to a given scancode 868 * @dev: input device which keymap is being queried 869 * @ke: keymap entry 870 * 871 * This function should be called by anyone interested in retrieving current 872 * keymap. Presently evdev handlers use it. 873 */ 874 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 875 { 876 unsigned long flags; 877 int retval; 878 879 spin_lock_irqsave(&dev->event_lock, flags); 880 retval = dev->getkeycode(dev, ke); 881 spin_unlock_irqrestore(&dev->event_lock, flags); 882 883 return retval; 884 } 885 EXPORT_SYMBOL(input_get_keycode); 886 887 /** 888 * input_set_keycode - attribute a keycode to a given scancode 889 * @dev: input device which keymap is being updated 890 * @ke: new keymap entry 891 * 892 * This function should be called by anyone needing to update current 893 * keymap. Presently keyboard and evdev handlers use it. 894 */ 895 int input_set_keycode(struct input_dev *dev, 896 const struct input_keymap_entry *ke) 897 { 898 unsigned long flags; 899 unsigned int old_keycode; 900 int retval; 901 902 if (ke->keycode > KEY_MAX) 903 return -EINVAL; 904 905 spin_lock_irqsave(&dev->event_lock, flags); 906 907 retval = dev->setkeycode(dev, ke, &old_keycode); 908 if (retval) 909 goto out; 910 911 /* Make sure KEY_RESERVED did not get enabled. */ 912 __clear_bit(KEY_RESERVED, dev->keybit); 913 914 /* 915 * Simulate keyup event if keycode is not present 916 * in the keymap anymore 917 */ 918 if (test_bit(EV_KEY, dev->evbit) && 919 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 920 __test_and_clear_bit(old_keycode, dev->key)) { 921 struct input_value vals[] = { 922 { EV_KEY, old_keycode, 0 }, 923 input_value_sync 924 }; 925 926 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 927 } 928 929 out: 930 spin_unlock_irqrestore(&dev->event_lock, flags); 931 932 return retval; 933 } 934 EXPORT_SYMBOL(input_set_keycode); 935 936 bool input_match_device_id(const struct input_dev *dev, 937 const struct input_device_id *id) 938 { 939 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 940 if (id->bustype != dev->id.bustype) 941 return false; 942 943 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 944 if (id->vendor != dev->id.vendor) 945 return false; 946 947 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 948 if (id->product != dev->id.product) 949 return false; 950 951 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 952 if (id->version != dev->id.version) 953 return false; 954 955 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 956 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 957 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 958 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 959 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 960 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 961 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 962 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 963 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 964 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 965 return false; 966 } 967 968 return true; 969 } 970 EXPORT_SYMBOL(input_match_device_id); 971 972 static const struct input_device_id *input_match_device(struct input_handler *handler, 973 struct input_dev *dev) 974 { 975 const struct input_device_id *id; 976 977 for (id = handler->id_table; id->flags || id->driver_info; id++) { 978 if (input_match_device_id(dev, id) && 979 (!handler->match || handler->match(handler, dev))) { 980 return id; 981 } 982 } 983 984 return NULL; 985 } 986 987 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 988 { 989 const struct input_device_id *id; 990 int error; 991 992 id = input_match_device(handler, dev); 993 if (!id) 994 return -ENODEV; 995 996 error = handler->connect(handler, dev, id); 997 if (error && error != -ENODEV) 998 pr_err("failed to attach handler %s to device %s, error: %d\n", 999 handler->name, kobject_name(&dev->dev.kobj), error); 1000 1001 return error; 1002 } 1003 1004 #ifdef CONFIG_COMPAT 1005 1006 static int input_bits_to_string(char *buf, int buf_size, 1007 unsigned long bits, bool skip_empty) 1008 { 1009 int len = 0; 1010 1011 if (in_compat_syscall()) { 1012 u32 dword = bits >> 32; 1013 if (dword || !skip_empty) 1014 len += snprintf(buf, buf_size, "%x ", dword); 1015 1016 dword = bits & 0xffffffffUL; 1017 if (dword || !skip_empty || len) 1018 len += snprintf(buf + len, max(buf_size - len, 0), 1019 "%x", dword); 1020 } else { 1021 if (bits || !skip_empty) 1022 len += snprintf(buf, buf_size, "%lx", bits); 1023 } 1024 1025 return len; 1026 } 1027 1028 #else /* !CONFIG_COMPAT */ 1029 1030 static int input_bits_to_string(char *buf, int buf_size, 1031 unsigned long bits, bool skip_empty) 1032 { 1033 return bits || !skip_empty ? 1034 snprintf(buf, buf_size, "%lx", bits) : 0; 1035 } 1036 1037 #endif 1038 1039 #ifdef CONFIG_PROC_FS 1040 1041 static struct proc_dir_entry *proc_bus_input_dir; 1042 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1043 static int input_devices_state; 1044 1045 static inline void input_wakeup_procfs_readers(void) 1046 { 1047 input_devices_state++; 1048 wake_up(&input_devices_poll_wait); 1049 } 1050 1051 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1052 { 1053 poll_wait(file, &input_devices_poll_wait, wait); 1054 if (file->f_version != input_devices_state) { 1055 file->f_version = input_devices_state; 1056 return EPOLLIN | EPOLLRDNORM; 1057 } 1058 1059 return 0; 1060 } 1061 1062 union input_seq_state { 1063 struct { 1064 unsigned short pos; 1065 bool mutex_acquired; 1066 }; 1067 void *p; 1068 }; 1069 1070 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1071 { 1072 union input_seq_state *state = (union input_seq_state *)&seq->private; 1073 int error; 1074 1075 /* We need to fit into seq->private pointer */ 1076 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1077 1078 error = mutex_lock_interruptible(&input_mutex); 1079 if (error) { 1080 state->mutex_acquired = false; 1081 return ERR_PTR(error); 1082 } 1083 1084 state->mutex_acquired = true; 1085 1086 return seq_list_start(&input_dev_list, *pos); 1087 } 1088 1089 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1090 { 1091 return seq_list_next(v, &input_dev_list, pos); 1092 } 1093 1094 static void input_seq_stop(struct seq_file *seq, void *v) 1095 { 1096 union input_seq_state *state = (union input_seq_state *)&seq->private; 1097 1098 if (state->mutex_acquired) 1099 mutex_unlock(&input_mutex); 1100 } 1101 1102 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1103 unsigned long *bitmap, int max) 1104 { 1105 int i; 1106 bool skip_empty = true; 1107 char buf[18]; 1108 1109 seq_printf(seq, "B: %s=", name); 1110 1111 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1112 if (input_bits_to_string(buf, sizeof(buf), 1113 bitmap[i], skip_empty)) { 1114 skip_empty = false; 1115 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1116 } 1117 } 1118 1119 /* 1120 * If no output was produced print a single 0. 1121 */ 1122 if (skip_empty) 1123 seq_putc(seq, '0'); 1124 1125 seq_putc(seq, '\n'); 1126 } 1127 1128 static int input_devices_seq_show(struct seq_file *seq, void *v) 1129 { 1130 struct input_dev *dev = container_of(v, struct input_dev, node); 1131 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1132 struct input_handle *handle; 1133 1134 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1135 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1136 1137 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1138 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1139 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1140 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1141 seq_puts(seq, "H: Handlers="); 1142 1143 list_for_each_entry(handle, &dev->h_list, d_node) 1144 seq_printf(seq, "%s ", handle->name); 1145 seq_putc(seq, '\n'); 1146 1147 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1148 1149 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1150 if (test_bit(EV_KEY, dev->evbit)) 1151 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1152 if (test_bit(EV_REL, dev->evbit)) 1153 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1154 if (test_bit(EV_ABS, dev->evbit)) 1155 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1156 if (test_bit(EV_MSC, dev->evbit)) 1157 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1158 if (test_bit(EV_LED, dev->evbit)) 1159 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1160 if (test_bit(EV_SND, dev->evbit)) 1161 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1162 if (test_bit(EV_FF, dev->evbit)) 1163 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1164 if (test_bit(EV_SW, dev->evbit)) 1165 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1166 1167 seq_putc(seq, '\n'); 1168 1169 kfree(path); 1170 return 0; 1171 } 1172 1173 static const struct seq_operations input_devices_seq_ops = { 1174 .start = input_devices_seq_start, 1175 .next = input_devices_seq_next, 1176 .stop = input_seq_stop, 1177 .show = input_devices_seq_show, 1178 }; 1179 1180 static int input_proc_devices_open(struct inode *inode, struct file *file) 1181 { 1182 return seq_open(file, &input_devices_seq_ops); 1183 } 1184 1185 static const struct file_operations input_devices_fileops = { 1186 .owner = THIS_MODULE, 1187 .open = input_proc_devices_open, 1188 .poll = input_proc_devices_poll, 1189 .read = seq_read, 1190 .llseek = seq_lseek, 1191 .release = seq_release, 1192 }; 1193 1194 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1195 { 1196 union input_seq_state *state = (union input_seq_state *)&seq->private; 1197 int error; 1198 1199 /* We need to fit into seq->private pointer */ 1200 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1201 1202 error = mutex_lock_interruptible(&input_mutex); 1203 if (error) { 1204 state->mutex_acquired = false; 1205 return ERR_PTR(error); 1206 } 1207 1208 state->mutex_acquired = true; 1209 state->pos = *pos; 1210 1211 return seq_list_start(&input_handler_list, *pos); 1212 } 1213 1214 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1215 { 1216 union input_seq_state *state = (union input_seq_state *)&seq->private; 1217 1218 state->pos = *pos + 1; 1219 return seq_list_next(v, &input_handler_list, pos); 1220 } 1221 1222 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1223 { 1224 struct input_handler *handler = container_of(v, struct input_handler, node); 1225 union input_seq_state *state = (union input_seq_state *)&seq->private; 1226 1227 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1228 if (handler->filter) 1229 seq_puts(seq, " (filter)"); 1230 if (handler->legacy_minors) 1231 seq_printf(seq, " Minor=%d", handler->minor); 1232 seq_putc(seq, '\n'); 1233 1234 return 0; 1235 } 1236 1237 static const struct seq_operations input_handlers_seq_ops = { 1238 .start = input_handlers_seq_start, 1239 .next = input_handlers_seq_next, 1240 .stop = input_seq_stop, 1241 .show = input_handlers_seq_show, 1242 }; 1243 1244 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1245 { 1246 return seq_open(file, &input_handlers_seq_ops); 1247 } 1248 1249 static const struct file_operations input_handlers_fileops = { 1250 .owner = THIS_MODULE, 1251 .open = input_proc_handlers_open, 1252 .read = seq_read, 1253 .llseek = seq_lseek, 1254 .release = seq_release, 1255 }; 1256 1257 static int __init input_proc_init(void) 1258 { 1259 struct proc_dir_entry *entry; 1260 1261 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1262 if (!proc_bus_input_dir) 1263 return -ENOMEM; 1264 1265 entry = proc_create("devices", 0, proc_bus_input_dir, 1266 &input_devices_fileops); 1267 if (!entry) 1268 goto fail1; 1269 1270 entry = proc_create("handlers", 0, proc_bus_input_dir, 1271 &input_handlers_fileops); 1272 if (!entry) 1273 goto fail2; 1274 1275 return 0; 1276 1277 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1278 fail1: remove_proc_entry("bus/input", NULL); 1279 return -ENOMEM; 1280 } 1281 1282 static void input_proc_exit(void) 1283 { 1284 remove_proc_entry("devices", proc_bus_input_dir); 1285 remove_proc_entry("handlers", proc_bus_input_dir); 1286 remove_proc_entry("bus/input", NULL); 1287 } 1288 1289 #else /* !CONFIG_PROC_FS */ 1290 static inline void input_wakeup_procfs_readers(void) { } 1291 static inline int input_proc_init(void) { return 0; } 1292 static inline void input_proc_exit(void) { } 1293 #endif 1294 1295 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1296 static ssize_t input_dev_show_##name(struct device *dev, \ 1297 struct device_attribute *attr, \ 1298 char *buf) \ 1299 { \ 1300 struct input_dev *input_dev = to_input_dev(dev); \ 1301 \ 1302 return scnprintf(buf, PAGE_SIZE, "%s\n", \ 1303 input_dev->name ? input_dev->name : ""); \ 1304 } \ 1305 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1306 1307 INPUT_DEV_STRING_ATTR_SHOW(name); 1308 INPUT_DEV_STRING_ATTR_SHOW(phys); 1309 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1310 1311 static int input_print_modalias_bits(char *buf, int size, 1312 char name, unsigned long *bm, 1313 unsigned int min_bit, unsigned int max_bit) 1314 { 1315 int len = 0, i; 1316 1317 len += snprintf(buf, max(size, 0), "%c", name); 1318 for (i = min_bit; i < max_bit; i++) 1319 if (bm[BIT_WORD(i)] & BIT_MASK(i)) 1320 len += snprintf(buf + len, max(size - len, 0), "%X,", i); 1321 return len; 1322 } 1323 1324 static int input_print_modalias(char *buf, int size, struct input_dev *id, 1325 int add_cr) 1326 { 1327 int len; 1328 1329 len = snprintf(buf, max(size, 0), 1330 "input:b%04Xv%04Xp%04Xe%04X-", 1331 id->id.bustype, id->id.vendor, 1332 id->id.product, id->id.version); 1333 1334 len += input_print_modalias_bits(buf + len, size - len, 1335 'e', id->evbit, 0, EV_MAX); 1336 len += input_print_modalias_bits(buf + len, size - len, 1337 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1338 len += input_print_modalias_bits(buf + len, size - len, 1339 'r', id->relbit, 0, REL_MAX); 1340 len += input_print_modalias_bits(buf + len, size - len, 1341 'a', id->absbit, 0, ABS_MAX); 1342 len += input_print_modalias_bits(buf + len, size - len, 1343 'm', id->mscbit, 0, MSC_MAX); 1344 len += input_print_modalias_bits(buf + len, size - len, 1345 'l', id->ledbit, 0, LED_MAX); 1346 len += input_print_modalias_bits(buf + len, size - len, 1347 's', id->sndbit, 0, SND_MAX); 1348 len += input_print_modalias_bits(buf + len, size - len, 1349 'f', id->ffbit, 0, FF_MAX); 1350 len += input_print_modalias_bits(buf + len, size - len, 1351 'w', id->swbit, 0, SW_MAX); 1352 1353 if (add_cr) 1354 len += snprintf(buf + len, max(size - len, 0), "\n"); 1355 1356 return len; 1357 } 1358 1359 static ssize_t input_dev_show_modalias(struct device *dev, 1360 struct device_attribute *attr, 1361 char *buf) 1362 { 1363 struct input_dev *id = to_input_dev(dev); 1364 ssize_t len; 1365 1366 len = input_print_modalias(buf, PAGE_SIZE, id, 1); 1367 1368 return min_t(int, len, PAGE_SIZE); 1369 } 1370 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1371 1372 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1373 int max, int add_cr); 1374 1375 static ssize_t input_dev_show_properties(struct device *dev, 1376 struct device_attribute *attr, 1377 char *buf) 1378 { 1379 struct input_dev *input_dev = to_input_dev(dev); 1380 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1381 INPUT_PROP_MAX, true); 1382 return min_t(int, len, PAGE_SIZE); 1383 } 1384 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1385 1386 static struct attribute *input_dev_attrs[] = { 1387 &dev_attr_name.attr, 1388 &dev_attr_phys.attr, 1389 &dev_attr_uniq.attr, 1390 &dev_attr_modalias.attr, 1391 &dev_attr_properties.attr, 1392 NULL 1393 }; 1394 1395 static const struct attribute_group input_dev_attr_group = { 1396 .attrs = input_dev_attrs, 1397 }; 1398 1399 #define INPUT_DEV_ID_ATTR(name) \ 1400 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1401 struct device_attribute *attr, \ 1402 char *buf) \ 1403 { \ 1404 struct input_dev *input_dev = to_input_dev(dev); \ 1405 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \ 1406 } \ 1407 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1408 1409 INPUT_DEV_ID_ATTR(bustype); 1410 INPUT_DEV_ID_ATTR(vendor); 1411 INPUT_DEV_ID_ATTR(product); 1412 INPUT_DEV_ID_ATTR(version); 1413 1414 static struct attribute *input_dev_id_attrs[] = { 1415 &dev_attr_bustype.attr, 1416 &dev_attr_vendor.attr, 1417 &dev_attr_product.attr, 1418 &dev_attr_version.attr, 1419 NULL 1420 }; 1421 1422 static const struct attribute_group input_dev_id_attr_group = { 1423 .name = "id", 1424 .attrs = input_dev_id_attrs, 1425 }; 1426 1427 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1428 int max, int add_cr) 1429 { 1430 int i; 1431 int len = 0; 1432 bool skip_empty = true; 1433 1434 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1435 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1436 bitmap[i], skip_empty); 1437 if (len) { 1438 skip_empty = false; 1439 if (i > 0) 1440 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1441 } 1442 } 1443 1444 /* 1445 * If no output was produced print a single 0. 1446 */ 1447 if (len == 0) 1448 len = snprintf(buf, buf_size, "%d", 0); 1449 1450 if (add_cr) 1451 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1452 1453 return len; 1454 } 1455 1456 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1457 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1458 struct device_attribute *attr, \ 1459 char *buf) \ 1460 { \ 1461 struct input_dev *input_dev = to_input_dev(dev); \ 1462 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1463 input_dev->bm##bit, ev##_MAX, \ 1464 true); \ 1465 return min_t(int, len, PAGE_SIZE); \ 1466 } \ 1467 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1468 1469 INPUT_DEV_CAP_ATTR(EV, ev); 1470 INPUT_DEV_CAP_ATTR(KEY, key); 1471 INPUT_DEV_CAP_ATTR(REL, rel); 1472 INPUT_DEV_CAP_ATTR(ABS, abs); 1473 INPUT_DEV_CAP_ATTR(MSC, msc); 1474 INPUT_DEV_CAP_ATTR(LED, led); 1475 INPUT_DEV_CAP_ATTR(SND, snd); 1476 INPUT_DEV_CAP_ATTR(FF, ff); 1477 INPUT_DEV_CAP_ATTR(SW, sw); 1478 1479 static struct attribute *input_dev_caps_attrs[] = { 1480 &dev_attr_ev.attr, 1481 &dev_attr_key.attr, 1482 &dev_attr_rel.attr, 1483 &dev_attr_abs.attr, 1484 &dev_attr_msc.attr, 1485 &dev_attr_led.attr, 1486 &dev_attr_snd.attr, 1487 &dev_attr_ff.attr, 1488 &dev_attr_sw.attr, 1489 NULL 1490 }; 1491 1492 static const struct attribute_group input_dev_caps_attr_group = { 1493 .name = "capabilities", 1494 .attrs = input_dev_caps_attrs, 1495 }; 1496 1497 static const struct attribute_group *input_dev_attr_groups[] = { 1498 &input_dev_attr_group, 1499 &input_dev_id_attr_group, 1500 &input_dev_caps_attr_group, 1501 NULL 1502 }; 1503 1504 static void input_dev_release(struct device *device) 1505 { 1506 struct input_dev *dev = to_input_dev(device); 1507 1508 input_ff_destroy(dev); 1509 input_mt_destroy_slots(dev); 1510 kfree(dev->absinfo); 1511 kfree(dev->vals); 1512 kfree(dev); 1513 1514 module_put(THIS_MODULE); 1515 } 1516 1517 /* 1518 * Input uevent interface - loading event handlers based on 1519 * device bitfields. 1520 */ 1521 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1522 const char *name, unsigned long *bitmap, int max) 1523 { 1524 int len; 1525 1526 if (add_uevent_var(env, "%s", name)) 1527 return -ENOMEM; 1528 1529 len = input_print_bitmap(&env->buf[env->buflen - 1], 1530 sizeof(env->buf) - env->buflen, 1531 bitmap, max, false); 1532 if (len >= (sizeof(env->buf) - env->buflen)) 1533 return -ENOMEM; 1534 1535 env->buflen += len; 1536 return 0; 1537 } 1538 1539 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1540 struct input_dev *dev) 1541 { 1542 int len; 1543 1544 if (add_uevent_var(env, "MODALIAS=")) 1545 return -ENOMEM; 1546 1547 len = input_print_modalias(&env->buf[env->buflen - 1], 1548 sizeof(env->buf) - env->buflen, 1549 dev, 0); 1550 if (len >= (sizeof(env->buf) - env->buflen)) 1551 return -ENOMEM; 1552 1553 env->buflen += len; 1554 return 0; 1555 } 1556 1557 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1558 do { \ 1559 int err = add_uevent_var(env, fmt, val); \ 1560 if (err) \ 1561 return err; \ 1562 } while (0) 1563 1564 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1565 do { \ 1566 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1567 if (err) \ 1568 return err; \ 1569 } while (0) 1570 1571 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1572 do { \ 1573 int err = input_add_uevent_modalias_var(env, dev); \ 1574 if (err) \ 1575 return err; \ 1576 } while (0) 1577 1578 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) 1579 { 1580 struct input_dev *dev = to_input_dev(device); 1581 1582 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1583 dev->id.bustype, dev->id.vendor, 1584 dev->id.product, dev->id.version); 1585 if (dev->name) 1586 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1587 if (dev->phys) 1588 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1589 if (dev->uniq) 1590 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1591 1592 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1593 1594 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1595 if (test_bit(EV_KEY, dev->evbit)) 1596 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1597 if (test_bit(EV_REL, dev->evbit)) 1598 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1599 if (test_bit(EV_ABS, dev->evbit)) 1600 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1601 if (test_bit(EV_MSC, dev->evbit)) 1602 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1603 if (test_bit(EV_LED, dev->evbit)) 1604 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1605 if (test_bit(EV_SND, dev->evbit)) 1606 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1607 if (test_bit(EV_FF, dev->evbit)) 1608 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1609 if (test_bit(EV_SW, dev->evbit)) 1610 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1611 1612 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1613 1614 return 0; 1615 } 1616 1617 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1618 do { \ 1619 int i; \ 1620 bool active; \ 1621 \ 1622 if (!test_bit(EV_##type, dev->evbit)) \ 1623 break; \ 1624 \ 1625 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1626 active = test_bit(i, dev->bits); \ 1627 if (!active && !on) \ 1628 continue; \ 1629 \ 1630 dev->event(dev, EV_##type, i, on ? active : 0); \ 1631 } \ 1632 } while (0) 1633 1634 static void input_dev_toggle(struct input_dev *dev, bool activate) 1635 { 1636 if (!dev->event) 1637 return; 1638 1639 INPUT_DO_TOGGLE(dev, LED, led, activate); 1640 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1641 1642 if (activate && test_bit(EV_REP, dev->evbit)) { 1643 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1644 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1645 } 1646 } 1647 1648 /** 1649 * input_reset_device() - reset/restore the state of input device 1650 * @dev: input device whose state needs to be reset 1651 * 1652 * This function tries to reset the state of an opened input device and 1653 * bring internal state and state if the hardware in sync with each other. 1654 * We mark all keys as released, restore LED state, repeat rate, etc. 1655 */ 1656 void input_reset_device(struct input_dev *dev) 1657 { 1658 unsigned long flags; 1659 1660 mutex_lock(&dev->mutex); 1661 spin_lock_irqsave(&dev->event_lock, flags); 1662 1663 input_dev_toggle(dev, true); 1664 input_dev_release_keys(dev); 1665 1666 spin_unlock_irqrestore(&dev->event_lock, flags); 1667 mutex_unlock(&dev->mutex); 1668 } 1669 EXPORT_SYMBOL(input_reset_device); 1670 1671 #ifdef CONFIG_PM_SLEEP 1672 static int input_dev_suspend(struct device *dev) 1673 { 1674 struct input_dev *input_dev = to_input_dev(dev); 1675 1676 spin_lock_irq(&input_dev->event_lock); 1677 1678 /* 1679 * Keys that are pressed now are unlikely to be 1680 * still pressed when we resume. 1681 */ 1682 input_dev_release_keys(input_dev); 1683 1684 /* Turn off LEDs and sounds, if any are active. */ 1685 input_dev_toggle(input_dev, false); 1686 1687 spin_unlock_irq(&input_dev->event_lock); 1688 1689 return 0; 1690 } 1691 1692 static int input_dev_resume(struct device *dev) 1693 { 1694 struct input_dev *input_dev = to_input_dev(dev); 1695 1696 spin_lock_irq(&input_dev->event_lock); 1697 1698 /* Restore state of LEDs and sounds, if any were active. */ 1699 input_dev_toggle(input_dev, true); 1700 1701 spin_unlock_irq(&input_dev->event_lock); 1702 1703 return 0; 1704 } 1705 1706 static int input_dev_freeze(struct device *dev) 1707 { 1708 struct input_dev *input_dev = to_input_dev(dev); 1709 1710 spin_lock_irq(&input_dev->event_lock); 1711 1712 /* 1713 * Keys that are pressed now are unlikely to be 1714 * still pressed when we resume. 1715 */ 1716 input_dev_release_keys(input_dev); 1717 1718 spin_unlock_irq(&input_dev->event_lock); 1719 1720 return 0; 1721 } 1722 1723 static int input_dev_poweroff(struct device *dev) 1724 { 1725 struct input_dev *input_dev = to_input_dev(dev); 1726 1727 spin_lock_irq(&input_dev->event_lock); 1728 1729 /* Turn off LEDs and sounds, if any are active. */ 1730 input_dev_toggle(input_dev, false); 1731 1732 spin_unlock_irq(&input_dev->event_lock); 1733 1734 return 0; 1735 } 1736 1737 static const struct dev_pm_ops input_dev_pm_ops = { 1738 .suspend = input_dev_suspend, 1739 .resume = input_dev_resume, 1740 .freeze = input_dev_freeze, 1741 .poweroff = input_dev_poweroff, 1742 .restore = input_dev_resume, 1743 }; 1744 #endif /* CONFIG_PM */ 1745 1746 static const struct device_type input_dev_type = { 1747 .groups = input_dev_attr_groups, 1748 .release = input_dev_release, 1749 .uevent = input_dev_uevent, 1750 #ifdef CONFIG_PM_SLEEP 1751 .pm = &input_dev_pm_ops, 1752 #endif 1753 }; 1754 1755 static char *input_devnode(struct device *dev, umode_t *mode) 1756 { 1757 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1758 } 1759 1760 struct class input_class = { 1761 .name = "input", 1762 .devnode = input_devnode, 1763 }; 1764 EXPORT_SYMBOL_GPL(input_class); 1765 1766 /** 1767 * input_allocate_device - allocate memory for new input device 1768 * 1769 * Returns prepared struct input_dev or %NULL. 1770 * 1771 * NOTE: Use input_free_device() to free devices that have not been 1772 * registered; input_unregister_device() should be used for already 1773 * registered devices. 1774 */ 1775 struct input_dev *input_allocate_device(void) 1776 { 1777 static atomic_t input_no = ATOMIC_INIT(-1); 1778 struct input_dev *dev; 1779 1780 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1781 if (dev) { 1782 dev->dev.type = &input_dev_type; 1783 dev->dev.class = &input_class; 1784 device_initialize(&dev->dev); 1785 mutex_init(&dev->mutex); 1786 spin_lock_init(&dev->event_lock); 1787 timer_setup(&dev->timer, NULL, 0); 1788 INIT_LIST_HEAD(&dev->h_list); 1789 INIT_LIST_HEAD(&dev->node); 1790 1791 dev_set_name(&dev->dev, "input%lu", 1792 (unsigned long)atomic_inc_return(&input_no)); 1793 1794 __module_get(THIS_MODULE); 1795 } 1796 1797 return dev; 1798 } 1799 EXPORT_SYMBOL(input_allocate_device); 1800 1801 struct input_devres { 1802 struct input_dev *input; 1803 }; 1804 1805 static int devm_input_device_match(struct device *dev, void *res, void *data) 1806 { 1807 struct input_devres *devres = res; 1808 1809 return devres->input == data; 1810 } 1811 1812 static void devm_input_device_release(struct device *dev, void *res) 1813 { 1814 struct input_devres *devres = res; 1815 struct input_dev *input = devres->input; 1816 1817 dev_dbg(dev, "%s: dropping reference to %s\n", 1818 __func__, dev_name(&input->dev)); 1819 input_put_device(input); 1820 } 1821 1822 /** 1823 * devm_input_allocate_device - allocate managed input device 1824 * @dev: device owning the input device being created 1825 * 1826 * Returns prepared struct input_dev or %NULL. 1827 * 1828 * Managed input devices do not need to be explicitly unregistered or 1829 * freed as it will be done automatically when owner device unbinds from 1830 * its driver (or binding fails). Once managed input device is allocated, 1831 * it is ready to be set up and registered in the same fashion as regular 1832 * input device. There are no special devm_input_device_[un]register() 1833 * variants, regular ones work with both managed and unmanaged devices, 1834 * should you need them. In most cases however, managed input device need 1835 * not be explicitly unregistered or freed. 1836 * 1837 * NOTE: the owner device is set up as parent of input device and users 1838 * should not override it. 1839 */ 1840 struct input_dev *devm_input_allocate_device(struct device *dev) 1841 { 1842 struct input_dev *input; 1843 struct input_devres *devres; 1844 1845 devres = devres_alloc(devm_input_device_release, 1846 sizeof(*devres), GFP_KERNEL); 1847 if (!devres) 1848 return NULL; 1849 1850 input = input_allocate_device(); 1851 if (!input) { 1852 devres_free(devres); 1853 return NULL; 1854 } 1855 1856 input->dev.parent = dev; 1857 input->devres_managed = true; 1858 1859 devres->input = input; 1860 devres_add(dev, devres); 1861 1862 return input; 1863 } 1864 EXPORT_SYMBOL(devm_input_allocate_device); 1865 1866 /** 1867 * input_free_device - free memory occupied by input_dev structure 1868 * @dev: input device to free 1869 * 1870 * This function should only be used if input_register_device() 1871 * was not called yet or if it failed. Once device was registered 1872 * use input_unregister_device() and memory will be freed once last 1873 * reference to the device is dropped. 1874 * 1875 * Device should be allocated by input_allocate_device(). 1876 * 1877 * NOTE: If there are references to the input device then memory 1878 * will not be freed until last reference is dropped. 1879 */ 1880 void input_free_device(struct input_dev *dev) 1881 { 1882 if (dev) { 1883 if (dev->devres_managed) 1884 WARN_ON(devres_destroy(dev->dev.parent, 1885 devm_input_device_release, 1886 devm_input_device_match, 1887 dev)); 1888 input_put_device(dev); 1889 } 1890 } 1891 EXPORT_SYMBOL(input_free_device); 1892 1893 /** 1894 * input_set_capability - mark device as capable of a certain event 1895 * @dev: device that is capable of emitting or accepting event 1896 * @type: type of the event (EV_KEY, EV_REL, etc...) 1897 * @code: event code 1898 * 1899 * In addition to setting up corresponding bit in appropriate capability 1900 * bitmap the function also adjusts dev->evbit. 1901 */ 1902 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 1903 { 1904 switch (type) { 1905 case EV_KEY: 1906 __set_bit(code, dev->keybit); 1907 break; 1908 1909 case EV_REL: 1910 __set_bit(code, dev->relbit); 1911 break; 1912 1913 case EV_ABS: 1914 input_alloc_absinfo(dev); 1915 if (!dev->absinfo) 1916 return; 1917 1918 __set_bit(code, dev->absbit); 1919 break; 1920 1921 case EV_MSC: 1922 __set_bit(code, dev->mscbit); 1923 break; 1924 1925 case EV_SW: 1926 __set_bit(code, dev->swbit); 1927 break; 1928 1929 case EV_LED: 1930 __set_bit(code, dev->ledbit); 1931 break; 1932 1933 case EV_SND: 1934 __set_bit(code, dev->sndbit); 1935 break; 1936 1937 case EV_FF: 1938 __set_bit(code, dev->ffbit); 1939 break; 1940 1941 case EV_PWR: 1942 /* do nothing */ 1943 break; 1944 1945 default: 1946 pr_err("input_set_capability: unknown type %u (code %u)\n", 1947 type, code); 1948 dump_stack(); 1949 return; 1950 } 1951 1952 __set_bit(type, dev->evbit); 1953 } 1954 EXPORT_SYMBOL(input_set_capability); 1955 1956 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 1957 { 1958 int mt_slots; 1959 int i; 1960 unsigned int events; 1961 1962 if (dev->mt) { 1963 mt_slots = dev->mt->num_slots; 1964 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 1965 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 1966 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 1967 mt_slots = clamp(mt_slots, 2, 32); 1968 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 1969 mt_slots = 2; 1970 } else { 1971 mt_slots = 0; 1972 } 1973 1974 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 1975 1976 if (test_bit(EV_ABS, dev->evbit)) 1977 for_each_set_bit(i, dev->absbit, ABS_CNT) 1978 events += input_is_mt_axis(i) ? mt_slots : 1; 1979 1980 if (test_bit(EV_REL, dev->evbit)) 1981 events += bitmap_weight(dev->relbit, REL_CNT); 1982 1983 /* Make room for KEY and MSC events */ 1984 events += 7; 1985 1986 return events; 1987 } 1988 1989 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 1990 do { \ 1991 if (!test_bit(EV_##type, dev->evbit)) \ 1992 memset(dev->bits##bit, 0, \ 1993 sizeof(dev->bits##bit)); \ 1994 } while (0) 1995 1996 static void input_cleanse_bitmasks(struct input_dev *dev) 1997 { 1998 INPUT_CLEANSE_BITMASK(dev, KEY, key); 1999 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2000 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2001 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2002 INPUT_CLEANSE_BITMASK(dev, LED, led); 2003 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2004 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2005 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2006 } 2007 2008 static void __input_unregister_device(struct input_dev *dev) 2009 { 2010 struct input_handle *handle, *next; 2011 2012 input_disconnect_device(dev); 2013 2014 mutex_lock(&input_mutex); 2015 2016 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2017 handle->handler->disconnect(handle); 2018 WARN_ON(!list_empty(&dev->h_list)); 2019 2020 del_timer_sync(&dev->timer); 2021 list_del_init(&dev->node); 2022 2023 input_wakeup_procfs_readers(); 2024 2025 mutex_unlock(&input_mutex); 2026 2027 device_del(&dev->dev); 2028 } 2029 2030 static void devm_input_device_unregister(struct device *dev, void *res) 2031 { 2032 struct input_devres *devres = res; 2033 struct input_dev *input = devres->input; 2034 2035 dev_dbg(dev, "%s: unregistering device %s\n", 2036 __func__, dev_name(&input->dev)); 2037 __input_unregister_device(input); 2038 } 2039 2040 /** 2041 * input_enable_softrepeat - enable software autorepeat 2042 * @dev: input device 2043 * @delay: repeat delay 2044 * @period: repeat period 2045 * 2046 * Enable software autorepeat on the input device. 2047 */ 2048 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2049 { 2050 dev->timer.function = input_repeat_key; 2051 dev->rep[REP_DELAY] = delay; 2052 dev->rep[REP_PERIOD] = period; 2053 } 2054 EXPORT_SYMBOL(input_enable_softrepeat); 2055 2056 /** 2057 * input_register_device - register device with input core 2058 * @dev: device to be registered 2059 * 2060 * This function registers device with input core. The device must be 2061 * allocated with input_allocate_device() and all it's capabilities 2062 * set up before registering. 2063 * If function fails the device must be freed with input_free_device(). 2064 * Once device has been successfully registered it can be unregistered 2065 * with input_unregister_device(); input_free_device() should not be 2066 * called in this case. 2067 * 2068 * Note that this function is also used to register managed input devices 2069 * (ones allocated with devm_input_allocate_device()). Such managed input 2070 * devices need not be explicitly unregistered or freed, their tear down 2071 * is controlled by the devres infrastructure. It is also worth noting 2072 * that tear down of managed input devices is internally a 2-step process: 2073 * registered managed input device is first unregistered, but stays in 2074 * memory and can still handle input_event() calls (although events will 2075 * not be delivered anywhere). The freeing of managed input device will 2076 * happen later, when devres stack is unwound to the point where device 2077 * allocation was made. 2078 */ 2079 int input_register_device(struct input_dev *dev) 2080 { 2081 struct input_devres *devres = NULL; 2082 struct input_handler *handler; 2083 unsigned int packet_size; 2084 const char *path; 2085 int error; 2086 2087 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2088 dev_err(&dev->dev, 2089 "Absolute device without dev->absinfo, refusing to register\n"); 2090 return -EINVAL; 2091 } 2092 2093 if (dev->devres_managed) { 2094 devres = devres_alloc(devm_input_device_unregister, 2095 sizeof(*devres), GFP_KERNEL); 2096 if (!devres) 2097 return -ENOMEM; 2098 2099 devres->input = dev; 2100 } 2101 2102 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2103 __set_bit(EV_SYN, dev->evbit); 2104 2105 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2106 __clear_bit(KEY_RESERVED, dev->keybit); 2107 2108 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2109 input_cleanse_bitmasks(dev); 2110 2111 packet_size = input_estimate_events_per_packet(dev); 2112 if (dev->hint_events_per_packet < packet_size) 2113 dev->hint_events_per_packet = packet_size; 2114 2115 dev->max_vals = dev->hint_events_per_packet + 2; 2116 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 2117 if (!dev->vals) { 2118 error = -ENOMEM; 2119 goto err_devres_free; 2120 } 2121 2122 /* 2123 * If delay and period are pre-set by the driver, then autorepeating 2124 * is handled by the driver itself and we don't do it in input.c. 2125 */ 2126 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2127 input_enable_softrepeat(dev, 250, 33); 2128 2129 if (!dev->getkeycode) 2130 dev->getkeycode = input_default_getkeycode; 2131 2132 if (!dev->setkeycode) 2133 dev->setkeycode = input_default_setkeycode; 2134 2135 error = device_add(&dev->dev); 2136 if (error) 2137 goto err_free_vals; 2138 2139 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2140 pr_info("%s as %s\n", 2141 dev->name ? dev->name : "Unspecified device", 2142 path ? path : "N/A"); 2143 kfree(path); 2144 2145 error = mutex_lock_interruptible(&input_mutex); 2146 if (error) 2147 goto err_device_del; 2148 2149 list_add_tail(&dev->node, &input_dev_list); 2150 2151 list_for_each_entry(handler, &input_handler_list, node) 2152 input_attach_handler(dev, handler); 2153 2154 input_wakeup_procfs_readers(); 2155 2156 mutex_unlock(&input_mutex); 2157 2158 if (dev->devres_managed) { 2159 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2160 __func__, dev_name(&dev->dev)); 2161 devres_add(dev->dev.parent, devres); 2162 } 2163 return 0; 2164 2165 err_device_del: 2166 device_del(&dev->dev); 2167 err_free_vals: 2168 kfree(dev->vals); 2169 dev->vals = NULL; 2170 err_devres_free: 2171 devres_free(devres); 2172 return error; 2173 } 2174 EXPORT_SYMBOL(input_register_device); 2175 2176 /** 2177 * input_unregister_device - unregister previously registered device 2178 * @dev: device to be unregistered 2179 * 2180 * This function unregisters an input device. Once device is unregistered 2181 * the caller should not try to access it as it may get freed at any moment. 2182 */ 2183 void input_unregister_device(struct input_dev *dev) 2184 { 2185 if (dev->devres_managed) { 2186 WARN_ON(devres_destroy(dev->dev.parent, 2187 devm_input_device_unregister, 2188 devm_input_device_match, 2189 dev)); 2190 __input_unregister_device(dev); 2191 /* 2192 * We do not do input_put_device() here because it will be done 2193 * when 2nd devres fires up. 2194 */ 2195 } else { 2196 __input_unregister_device(dev); 2197 input_put_device(dev); 2198 } 2199 } 2200 EXPORT_SYMBOL(input_unregister_device); 2201 2202 /** 2203 * input_register_handler - register a new input handler 2204 * @handler: handler to be registered 2205 * 2206 * This function registers a new input handler (interface) for input 2207 * devices in the system and attaches it to all input devices that 2208 * are compatible with the handler. 2209 */ 2210 int input_register_handler(struct input_handler *handler) 2211 { 2212 struct input_dev *dev; 2213 int error; 2214 2215 error = mutex_lock_interruptible(&input_mutex); 2216 if (error) 2217 return error; 2218 2219 INIT_LIST_HEAD(&handler->h_list); 2220 2221 list_add_tail(&handler->node, &input_handler_list); 2222 2223 list_for_each_entry(dev, &input_dev_list, node) 2224 input_attach_handler(dev, handler); 2225 2226 input_wakeup_procfs_readers(); 2227 2228 mutex_unlock(&input_mutex); 2229 return 0; 2230 } 2231 EXPORT_SYMBOL(input_register_handler); 2232 2233 /** 2234 * input_unregister_handler - unregisters an input handler 2235 * @handler: handler to be unregistered 2236 * 2237 * This function disconnects a handler from its input devices and 2238 * removes it from lists of known handlers. 2239 */ 2240 void input_unregister_handler(struct input_handler *handler) 2241 { 2242 struct input_handle *handle, *next; 2243 2244 mutex_lock(&input_mutex); 2245 2246 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2247 handler->disconnect(handle); 2248 WARN_ON(!list_empty(&handler->h_list)); 2249 2250 list_del_init(&handler->node); 2251 2252 input_wakeup_procfs_readers(); 2253 2254 mutex_unlock(&input_mutex); 2255 } 2256 EXPORT_SYMBOL(input_unregister_handler); 2257 2258 /** 2259 * input_handler_for_each_handle - handle iterator 2260 * @handler: input handler to iterate 2261 * @data: data for the callback 2262 * @fn: function to be called for each handle 2263 * 2264 * Iterate over @bus's list of devices, and call @fn for each, passing 2265 * it @data and stop when @fn returns a non-zero value. The function is 2266 * using RCU to traverse the list and therefore may be using in atomic 2267 * contexts. The @fn callback is invoked from RCU critical section and 2268 * thus must not sleep. 2269 */ 2270 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2271 int (*fn)(struct input_handle *, void *)) 2272 { 2273 struct input_handle *handle; 2274 int retval = 0; 2275 2276 rcu_read_lock(); 2277 2278 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2279 retval = fn(handle, data); 2280 if (retval) 2281 break; 2282 } 2283 2284 rcu_read_unlock(); 2285 2286 return retval; 2287 } 2288 EXPORT_SYMBOL(input_handler_for_each_handle); 2289 2290 /** 2291 * input_register_handle - register a new input handle 2292 * @handle: handle to register 2293 * 2294 * This function puts a new input handle onto device's 2295 * and handler's lists so that events can flow through 2296 * it once it is opened using input_open_device(). 2297 * 2298 * This function is supposed to be called from handler's 2299 * connect() method. 2300 */ 2301 int input_register_handle(struct input_handle *handle) 2302 { 2303 struct input_handler *handler = handle->handler; 2304 struct input_dev *dev = handle->dev; 2305 int error; 2306 2307 /* 2308 * We take dev->mutex here to prevent race with 2309 * input_release_device(). 2310 */ 2311 error = mutex_lock_interruptible(&dev->mutex); 2312 if (error) 2313 return error; 2314 2315 /* 2316 * Filters go to the head of the list, normal handlers 2317 * to the tail. 2318 */ 2319 if (handler->filter) 2320 list_add_rcu(&handle->d_node, &dev->h_list); 2321 else 2322 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2323 2324 mutex_unlock(&dev->mutex); 2325 2326 /* 2327 * Since we are supposed to be called from ->connect() 2328 * which is mutually exclusive with ->disconnect() 2329 * we can't be racing with input_unregister_handle() 2330 * and so separate lock is not needed here. 2331 */ 2332 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2333 2334 if (handler->start) 2335 handler->start(handle); 2336 2337 return 0; 2338 } 2339 EXPORT_SYMBOL(input_register_handle); 2340 2341 /** 2342 * input_unregister_handle - unregister an input handle 2343 * @handle: handle to unregister 2344 * 2345 * This function removes input handle from device's 2346 * and handler's lists. 2347 * 2348 * This function is supposed to be called from handler's 2349 * disconnect() method. 2350 */ 2351 void input_unregister_handle(struct input_handle *handle) 2352 { 2353 struct input_dev *dev = handle->dev; 2354 2355 list_del_rcu(&handle->h_node); 2356 2357 /* 2358 * Take dev->mutex to prevent race with input_release_device(). 2359 */ 2360 mutex_lock(&dev->mutex); 2361 list_del_rcu(&handle->d_node); 2362 mutex_unlock(&dev->mutex); 2363 2364 synchronize_rcu(); 2365 } 2366 EXPORT_SYMBOL(input_unregister_handle); 2367 2368 /** 2369 * input_get_new_minor - allocates a new input minor number 2370 * @legacy_base: beginning or the legacy range to be searched 2371 * @legacy_num: size of legacy range 2372 * @allow_dynamic: whether we can also take ID from the dynamic range 2373 * 2374 * This function allocates a new device minor for from input major namespace. 2375 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2376 * parameters and whether ID can be allocated from dynamic range if there are 2377 * no free IDs in legacy range. 2378 */ 2379 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2380 bool allow_dynamic) 2381 { 2382 /* 2383 * This function should be called from input handler's ->connect() 2384 * methods, which are serialized with input_mutex, so no additional 2385 * locking is needed here. 2386 */ 2387 if (legacy_base >= 0) { 2388 int minor = ida_simple_get(&input_ida, 2389 legacy_base, 2390 legacy_base + legacy_num, 2391 GFP_KERNEL); 2392 if (minor >= 0 || !allow_dynamic) 2393 return minor; 2394 } 2395 2396 return ida_simple_get(&input_ida, 2397 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES, 2398 GFP_KERNEL); 2399 } 2400 EXPORT_SYMBOL(input_get_new_minor); 2401 2402 /** 2403 * input_free_minor - release previously allocated minor 2404 * @minor: minor to be released 2405 * 2406 * This function releases previously allocated input minor so that it can be 2407 * reused later. 2408 */ 2409 void input_free_minor(unsigned int minor) 2410 { 2411 ida_simple_remove(&input_ida, minor); 2412 } 2413 EXPORT_SYMBOL(input_free_minor); 2414 2415 static int __init input_init(void) 2416 { 2417 int err; 2418 2419 err = class_register(&input_class); 2420 if (err) { 2421 pr_err("unable to register input_dev class\n"); 2422 return err; 2423 } 2424 2425 err = input_proc_init(); 2426 if (err) 2427 goto fail1; 2428 2429 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2430 INPUT_MAX_CHAR_DEVICES, "input"); 2431 if (err) { 2432 pr_err("unable to register char major %d", INPUT_MAJOR); 2433 goto fail2; 2434 } 2435 2436 return 0; 2437 2438 fail2: input_proc_exit(); 2439 fail1: class_unregister(&input_class); 2440 return err; 2441 } 2442 2443 static void __exit input_exit(void) 2444 { 2445 input_proc_exit(); 2446 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2447 INPUT_MAX_CHAR_DEVICES); 2448 class_unregister(&input_class); 2449 } 2450 2451 subsys_initcall(input_init); 2452 module_exit(input_exit); 2453