1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The input core 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/idr.h> 14 #include <linux/input/mt.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/major.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/poll.h> 23 #include <linux/device.h> 24 #include <linux/mutex.h> 25 #include <linux/rcupdate.h> 26 #include "input-compat.h" 27 #include "input-poller.h" 28 29 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 30 MODULE_DESCRIPTION("Input core"); 31 MODULE_LICENSE("GPL"); 32 33 #define INPUT_MAX_CHAR_DEVICES 1024 34 #define INPUT_FIRST_DYNAMIC_DEV 256 35 static DEFINE_IDA(input_ida); 36 37 static LIST_HEAD(input_dev_list); 38 static LIST_HEAD(input_handler_list); 39 40 /* 41 * input_mutex protects access to both input_dev_list and input_handler_list. 42 * This also causes input_[un]register_device and input_[un]register_handler 43 * be mutually exclusive which simplifies locking in drivers implementing 44 * input handlers. 45 */ 46 static DEFINE_MUTEX(input_mutex); 47 48 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 49 50 static inline int is_event_supported(unsigned int code, 51 unsigned long *bm, unsigned int max) 52 { 53 return code <= max && test_bit(code, bm); 54 } 55 56 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 57 { 58 if (fuzz) { 59 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 60 return old_val; 61 62 if (value > old_val - fuzz && value < old_val + fuzz) 63 return (old_val * 3 + value) / 4; 64 65 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 66 return (old_val + value) / 2; 67 } 68 69 return value; 70 } 71 72 static void input_start_autorepeat(struct input_dev *dev, int code) 73 { 74 if (test_bit(EV_REP, dev->evbit) && 75 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 76 dev->timer.function) { 77 dev->repeat_key = code; 78 mod_timer(&dev->timer, 79 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 80 } 81 } 82 83 static void input_stop_autorepeat(struct input_dev *dev) 84 { 85 del_timer(&dev->timer); 86 } 87 88 /* 89 * Pass event first through all filters and then, if event has not been 90 * filtered out, through all open handles. This function is called with 91 * dev->event_lock held and interrupts disabled. 92 */ 93 static unsigned int input_to_handler(struct input_handle *handle, 94 struct input_value *vals, unsigned int count) 95 { 96 struct input_handler *handler = handle->handler; 97 struct input_value *end = vals; 98 struct input_value *v; 99 100 if (handler->filter) { 101 for (v = vals; v != vals + count; v++) { 102 if (handler->filter(handle, v->type, v->code, v->value)) 103 continue; 104 if (end != v) 105 *end = *v; 106 end++; 107 } 108 count = end - vals; 109 } 110 111 if (!count) 112 return 0; 113 114 if (handler->events) 115 handler->events(handle, vals, count); 116 else if (handler->event) 117 for (v = vals; v != vals + count; v++) 118 handler->event(handle, v->type, v->code, v->value); 119 120 return count; 121 } 122 123 /* 124 * Pass values first through all filters and then, if event has not been 125 * filtered out, through all open handles. This function is called with 126 * dev->event_lock held and interrupts disabled. 127 */ 128 static void input_pass_values(struct input_dev *dev, 129 struct input_value *vals, unsigned int count) 130 { 131 struct input_handle *handle; 132 struct input_value *v; 133 134 if (!count) 135 return; 136 137 rcu_read_lock(); 138 139 handle = rcu_dereference(dev->grab); 140 if (handle) { 141 count = input_to_handler(handle, vals, count); 142 } else { 143 list_for_each_entry_rcu(handle, &dev->h_list, d_node) 144 if (handle->open) { 145 count = input_to_handler(handle, vals, count); 146 if (!count) 147 break; 148 } 149 } 150 151 rcu_read_unlock(); 152 153 /* trigger auto repeat for key events */ 154 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 155 for (v = vals; v != vals + count; v++) { 156 if (v->type == EV_KEY && v->value != 2) { 157 if (v->value) 158 input_start_autorepeat(dev, v->code); 159 else 160 input_stop_autorepeat(dev); 161 } 162 } 163 } 164 } 165 166 static void input_pass_event(struct input_dev *dev, 167 unsigned int type, unsigned int code, int value) 168 { 169 struct input_value vals[] = { { type, code, value } }; 170 171 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 172 } 173 174 /* 175 * Generate software autorepeat event. Note that we take 176 * dev->event_lock here to avoid racing with input_event 177 * which may cause keys get "stuck". 178 */ 179 static void input_repeat_key(struct timer_list *t) 180 { 181 struct input_dev *dev = from_timer(dev, t, timer); 182 unsigned long flags; 183 184 spin_lock_irqsave(&dev->event_lock, flags); 185 186 if (test_bit(dev->repeat_key, dev->key) && 187 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 188 struct input_value vals[] = { 189 { EV_KEY, dev->repeat_key, 2 }, 190 input_value_sync 191 }; 192 193 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 194 195 if (dev->rep[REP_PERIOD]) 196 mod_timer(&dev->timer, jiffies + 197 msecs_to_jiffies(dev->rep[REP_PERIOD])); 198 } 199 200 spin_unlock_irqrestore(&dev->event_lock, flags); 201 } 202 203 #define INPUT_IGNORE_EVENT 0 204 #define INPUT_PASS_TO_HANDLERS 1 205 #define INPUT_PASS_TO_DEVICE 2 206 #define INPUT_SLOT 4 207 #define INPUT_FLUSH 8 208 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 209 210 static int input_handle_abs_event(struct input_dev *dev, 211 unsigned int code, int *pval) 212 { 213 struct input_mt *mt = dev->mt; 214 bool is_mt_event; 215 int *pold; 216 217 if (code == ABS_MT_SLOT) { 218 /* 219 * "Stage" the event; we'll flush it later, when we 220 * get actual touch data. 221 */ 222 if (mt && *pval >= 0 && *pval < mt->num_slots) 223 mt->slot = *pval; 224 225 return INPUT_IGNORE_EVENT; 226 } 227 228 is_mt_event = input_is_mt_value(code); 229 230 if (!is_mt_event) { 231 pold = &dev->absinfo[code].value; 232 } else if (mt) { 233 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 234 } else { 235 /* 236 * Bypass filtering for multi-touch events when 237 * not employing slots. 238 */ 239 pold = NULL; 240 } 241 242 if (pold) { 243 *pval = input_defuzz_abs_event(*pval, *pold, 244 dev->absinfo[code].fuzz); 245 if (*pold == *pval) 246 return INPUT_IGNORE_EVENT; 247 248 *pold = *pval; 249 } 250 251 /* Flush pending "slot" event */ 252 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 253 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot); 254 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 255 } 256 257 return INPUT_PASS_TO_HANDLERS; 258 } 259 260 static int input_get_disposition(struct input_dev *dev, 261 unsigned int type, unsigned int code, int *pval) 262 { 263 int disposition = INPUT_IGNORE_EVENT; 264 int value = *pval; 265 266 switch (type) { 267 268 case EV_SYN: 269 switch (code) { 270 case SYN_CONFIG: 271 disposition = INPUT_PASS_TO_ALL; 272 break; 273 274 case SYN_REPORT: 275 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 276 break; 277 case SYN_MT_REPORT: 278 disposition = INPUT_PASS_TO_HANDLERS; 279 break; 280 } 281 break; 282 283 case EV_KEY: 284 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 285 286 /* auto-repeat bypasses state updates */ 287 if (value == 2) { 288 disposition = INPUT_PASS_TO_HANDLERS; 289 break; 290 } 291 292 if (!!test_bit(code, dev->key) != !!value) { 293 294 __change_bit(code, dev->key); 295 disposition = INPUT_PASS_TO_HANDLERS; 296 } 297 } 298 break; 299 300 case EV_SW: 301 if (is_event_supported(code, dev->swbit, SW_MAX) && 302 !!test_bit(code, dev->sw) != !!value) { 303 304 __change_bit(code, dev->sw); 305 disposition = INPUT_PASS_TO_HANDLERS; 306 } 307 break; 308 309 case EV_ABS: 310 if (is_event_supported(code, dev->absbit, ABS_MAX)) 311 disposition = input_handle_abs_event(dev, code, &value); 312 313 break; 314 315 case EV_REL: 316 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 317 disposition = INPUT_PASS_TO_HANDLERS; 318 319 break; 320 321 case EV_MSC: 322 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 323 disposition = INPUT_PASS_TO_ALL; 324 325 break; 326 327 case EV_LED: 328 if (is_event_supported(code, dev->ledbit, LED_MAX) && 329 !!test_bit(code, dev->led) != !!value) { 330 331 __change_bit(code, dev->led); 332 disposition = INPUT_PASS_TO_ALL; 333 } 334 break; 335 336 case EV_SND: 337 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 338 339 if (!!test_bit(code, dev->snd) != !!value) 340 __change_bit(code, dev->snd); 341 disposition = INPUT_PASS_TO_ALL; 342 } 343 break; 344 345 case EV_REP: 346 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 347 dev->rep[code] = value; 348 disposition = INPUT_PASS_TO_ALL; 349 } 350 break; 351 352 case EV_FF: 353 if (value >= 0) 354 disposition = INPUT_PASS_TO_ALL; 355 break; 356 357 case EV_PWR: 358 disposition = INPUT_PASS_TO_ALL; 359 break; 360 } 361 362 *pval = value; 363 return disposition; 364 } 365 366 static void input_handle_event(struct input_dev *dev, 367 unsigned int type, unsigned int code, int value) 368 { 369 int disposition = input_get_disposition(dev, type, code, &value); 370 371 if (disposition != INPUT_IGNORE_EVENT && type != EV_SYN) 372 add_input_randomness(type, code, value); 373 374 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 375 dev->event(dev, type, code, value); 376 377 if (!dev->vals) 378 return; 379 380 if (disposition & INPUT_PASS_TO_HANDLERS) { 381 struct input_value *v; 382 383 if (disposition & INPUT_SLOT) { 384 v = &dev->vals[dev->num_vals++]; 385 v->type = EV_ABS; 386 v->code = ABS_MT_SLOT; 387 v->value = dev->mt->slot; 388 } 389 390 v = &dev->vals[dev->num_vals++]; 391 v->type = type; 392 v->code = code; 393 v->value = value; 394 } 395 396 if (disposition & INPUT_FLUSH) { 397 if (dev->num_vals >= 2) 398 input_pass_values(dev, dev->vals, dev->num_vals); 399 dev->num_vals = 0; 400 /* 401 * Reset the timestamp on flush so we won't end up 402 * with a stale one. Note we only need to reset the 403 * monolithic one as we use its presence when deciding 404 * whether to generate a synthetic timestamp. 405 */ 406 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0); 407 } else if (dev->num_vals >= dev->max_vals - 2) { 408 dev->vals[dev->num_vals++] = input_value_sync; 409 input_pass_values(dev, dev->vals, dev->num_vals); 410 dev->num_vals = 0; 411 } 412 413 } 414 415 /** 416 * input_event() - report new input event 417 * @dev: device that generated the event 418 * @type: type of the event 419 * @code: event code 420 * @value: value of the event 421 * 422 * This function should be used by drivers implementing various input 423 * devices to report input events. See also input_inject_event(). 424 * 425 * NOTE: input_event() may be safely used right after input device was 426 * allocated with input_allocate_device(), even before it is registered 427 * with input_register_device(), but the event will not reach any of the 428 * input handlers. Such early invocation of input_event() may be used 429 * to 'seed' initial state of a switch or initial position of absolute 430 * axis, etc. 431 */ 432 void input_event(struct input_dev *dev, 433 unsigned int type, unsigned int code, int value) 434 { 435 unsigned long flags; 436 437 if (is_event_supported(type, dev->evbit, EV_MAX)) { 438 439 spin_lock_irqsave(&dev->event_lock, flags); 440 input_handle_event(dev, type, code, value); 441 spin_unlock_irqrestore(&dev->event_lock, flags); 442 } 443 } 444 EXPORT_SYMBOL(input_event); 445 446 /** 447 * input_inject_event() - send input event from input handler 448 * @handle: input handle to send event through 449 * @type: type of the event 450 * @code: event code 451 * @value: value of the event 452 * 453 * Similar to input_event() but will ignore event if device is 454 * "grabbed" and handle injecting event is not the one that owns 455 * the device. 456 */ 457 void input_inject_event(struct input_handle *handle, 458 unsigned int type, unsigned int code, int value) 459 { 460 struct input_dev *dev = handle->dev; 461 struct input_handle *grab; 462 unsigned long flags; 463 464 if (is_event_supported(type, dev->evbit, EV_MAX)) { 465 spin_lock_irqsave(&dev->event_lock, flags); 466 467 rcu_read_lock(); 468 grab = rcu_dereference(dev->grab); 469 if (!grab || grab == handle) 470 input_handle_event(dev, type, code, value); 471 rcu_read_unlock(); 472 473 spin_unlock_irqrestore(&dev->event_lock, flags); 474 } 475 } 476 EXPORT_SYMBOL(input_inject_event); 477 478 /** 479 * input_alloc_absinfo - allocates array of input_absinfo structs 480 * @dev: the input device emitting absolute events 481 * 482 * If the absinfo struct the caller asked for is already allocated, this 483 * functions will not do anything. 484 */ 485 void input_alloc_absinfo(struct input_dev *dev) 486 { 487 if (dev->absinfo) 488 return; 489 490 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); 491 if (!dev->absinfo) { 492 dev_err(dev->dev.parent ?: &dev->dev, 493 "%s: unable to allocate memory\n", __func__); 494 /* 495 * We will handle this allocation failure in 496 * input_register_device() when we refuse to register input 497 * device with ABS bits but without absinfo. 498 */ 499 } 500 } 501 EXPORT_SYMBOL(input_alloc_absinfo); 502 503 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 504 int min, int max, int fuzz, int flat) 505 { 506 struct input_absinfo *absinfo; 507 508 input_alloc_absinfo(dev); 509 if (!dev->absinfo) 510 return; 511 512 absinfo = &dev->absinfo[axis]; 513 absinfo->minimum = min; 514 absinfo->maximum = max; 515 absinfo->fuzz = fuzz; 516 absinfo->flat = flat; 517 518 __set_bit(EV_ABS, dev->evbit); 519 __set_bit(axis, dev->absbit); 520 } 521 EXPORT_SYMBOL(input_set_abs_params); 522 523 524 /** 525 * input_grab_device - grabs device for exclusive use 526 * @handle: input handle that wants to own the device 527 * 528 * When a device is grabbed by an input handle all events generated by 529 * the device are delivered only to this handle. Also events injected 530 * by other input handles are ignored while device is grabbed. 531 */ 532 int input_grab_device(struct input_handle *handle) 533 { 534 struct input_dev *dev = handle->dev; 535 int retval; 536 537 retval = mutex_lock_interruptible(&dev->mutex); 538 if (retval) 539 return retval; 540 541 if (dev->grab) { 542 retval = -EBUSY; 543 goto out; 544 } 545 546 rcu_assign_pointer(dev->grab, handle); 547 548 out: 549 mutex_unlock(&dev->mutex); 550 return retval; 551 } 552 EXPORT_SYMBOL(input_grab_device); 553 554 static void __input_release_device(struct input_handle *handle) 555 { 556 struct input_dev *dev = handle->dev; 557 struct input_handle *grabber; 558 559 grabber = rcu_dereference_protected(dev->grab, 560 lockdep_is_held(&dev->mutex)); 561 if (grabber == handle) { 562 rcu_assign_pointer(dev->grab, NULL); 563 /* Make sure input_pass_event() notices that grab is gone */ 564 synchronize_rcu(); 565 566 list_for_each_entry(handle, &dev->h_list, d_node) 567 if (handle->open && handle->handler->start) 568 handle->handler->start(handle); 569 } 570 } 571 572 /** 573 * input_release_device - release previously grabbed device 574 * @handle: input handle that owns the device 575 * 576 * Releases previously grabbed device so that other input handles can 577 * start receiving input events. Upon release all handlers attached 578 * to the device have their start() method called so they have a change 579 * to synchronize device state with the rest of the system. 580 */ 581 void input_release_device(struct input_handle *handle) 582 { 583 struct input_dev *dev = handle->dev; 584 585 mutex_lock(&dev->mutex); 586 __input_release_device(handle); 587 mutex_unlock(&dev->mutex); 588 } 589 EXPORT_SYMBOL(input_release_device); 590 591 /** 592 * input_open_device - open input device 593 * @handle: handle through which device is being accessed 594 * 595 * This function should be called by input handlers when they 596 * want to start receive events from given input device. 597 */ 598 int input_open_device(struct input_handle *handle) 599 { 600 struct input_dev *dev = handle->dev; 601 int retval; 602 603 retval = mutex_lock_interruptible(&dev->mutex); 604 if (retval) 605 return retval; 606 607 if (dev->going_away) { 608 retval = -ENODEV; 609 goto out; 610 } 611 612 handle->open++; 613 614 if (dev->users++) { 615 /* 616 * Device is already opened, so we can exit immediately and 617 * report success. 618 */ 619 goto out; 620 } 621 622 if (dev->open) { 623 retval = dev->open(dev); 624 if (retval) { 625 dev->users--; 626 handle->open--; 627 /* 628 * Make sure we are not delivering any more events 629 * through this handle 630 */ 631 synchronize_rcu(); 632 goto out; 633 } 634 } 635 636 if (dev->poller) 637 input_dev_poller_start(dev->poller); 638 639 out: 640 mutex_unlock(&dev->mutex); 641 return retval; 642 } 643 EXPORT_SYMBOL(input_open_device); 644 645 int input_flush_device(struct input_handle *handle, struct file *file) 646 { 647 struct input_dev *dev = handle->dev; 648 int retval; 649 650 retval = mutex_lock_interruptible(&dev->mutex); 651 if (retval) 652 return retval; 653 654 if (dev->flush) 655 retval = dev->flush(dev, file); 656 657 mutex_unlock(&dev->mutex); 658 return retval; 659 } 660 EXPORT_SYMBOL(input_flush_device); 661 662 /** 663 * input_close_device - close input device 664 * @handle: handle through which device is being accessed 665 * 666 * This function should be called by input handlers when they 667 * want to stop receive events from given input device. 668 */ 669 void input_close_device(struct input_handle *handle) 670 { 671 struct input_dev *dev = handle->dev; 672 673 mutex_lock(&dev->mutex); 674 675 __input_release_device(handle); 676 677 if (!--dev->users) { 678 if (dev->poller) 679 input_dev_poller_stop(dev->poller); 680 681 if (dev->close) 682 dev->close(dev); 683 } 684 685 if (!--handle->open) { 686 /* 687 * synchronize_rcu() makes sure that input_pass_event() 688 * completed and that no more input events are delivered 689 * through this handle 690 */ 691 synchronize_rcu(); 692 } 693 694 mutex_unlock(&dev->mutex); 695 } 696 EXPORT_SYMBOL(input_close_device); 697 698 /* 699 * Simulate keyup events for all keys that are marked as pressed. 700 * The function must be called with dev->event_lock held. 701 */ 702 static void input_dev_release_keys(struct input_dev *dev) 703 { 704 bool need_sync = false; 705 int code; 706 707 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 708 for_each_set_bit(code, dev->key, KEY_CNT) { 709 input_pass_event(dev, EV_KEY, code, 0); 710 need_sync = true; 711 } 712 713 if (need_sync) 714 input_pass_event(dev, EV_SYN, SYN_REPORT, 1); 715 716 memset(dev->key, 0, sizeof(dev->key)); 717 } 718 } 719 720 /* 721 * Prepare device for unregistering 722 */ 723 static void input_disconnect_device(struct input_dev *dev) 724 { 725 struct input_handle *handle; 726 727 /* 728 * Mark device as going away. Note that we take dev->mutex here 729 * not to protect access to dev->going_away but rather to ensure 730 * that there are no threads in the middle of input_open_device() 731 */ 732 mutex_lock(&dev->mutex); 733 dev->going_away = true; 734 mutex_unlock(&dev->mutex); 735 736 spin_lock_irq(&dev->event_lock); 737 738 /* 739 * Simulate keyup events for all pressed keys so that handlers 740 * are not left with "stuck" keys. The driver may continue 741 * generate events even after we done here but they will not 742 * reach any handlers. 743 */ 744 input_dev_release_keys(dev); 745 746 list_for_each_entry(handle, &dev->h_list, d_node) 747 handle->open = 0; 748 749 spin_unlock_irq(&dev->event_lock); 750 } 751 752 /** 753 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 754 * @ke: keymap entry containing scancode to be converted. 755 * @scancode: pointer to the location where converted scancode should 756 * be stored. 757 * 758 * This function is used to convert scancode stored in &struct keymap_entry 759 * into scalar form understood by legacy keymap handling methods. These 760 * methods expect scancodes to be represented as 'unsigned int'. 761 */ 762 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 763 unsigned int *scancode) 764 { 765 switch (ke->len) { 766 case 1: 767 *scancode = *((u8 *)ke->scancode); 768 break; 769 770 case 2: 771 *scancode = *((u16 *)ke->scancode); 772 break; 773 774 case 4: 775 *scancode = *((u32 *)ke->scancode); 776 break; 777 778 default: 779 return -EINVAL; 780 } 781 782 return 0; 783 } 784 EXPORT_SYMBOL(input_scancode_to_scalar); 785 786 /* 787 * Those routines handle the default case where no [gs]etkeycode() is 788 * defined. In this case, an array indexed by the scancode is used. 789 */ 790 791 static unsigned int input_fetch_keycode(struct input_dev *dev, 792 unsigned int index) 793 { 794 switch (dev->keycodesize) { 795 case 1: 796 return ((u8 *)dev->keycode)[index]; 797 798 case 2: 799 return ((u16 *)dev->keycode)[index]; 800 801 default: 802 return ((u32 *)dev->keycode)[index]; 803 } 804 } 805 806 static int input_default_getkeycode(struct input_dev *dev, 807 struct input_keymap_entry *ke) 808 { 809 unsigned int index; 810 int error; 811 812 if (!dev->keycodesize) 813 return -EINVAL; 814 815 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 816 index = ke->index; 817 else { 818 error = input_scancode_to_scalar(ke, &index); 819 if (error) 820 return error; 821 } 822 823 if (index >= dev->keycodemax) 824 return -EINVAL; 825 826 ke->keycode = input_fetch_keycode(dev, index); 827 ke->index = index; 828 ke->len = sizeof(index); 829 memcpy(ke->scancode, &index, sizeof(index)); 830 831 return 0; 832 } 833 834 static int input_default_setkeycode(struct input_dev *dev, 835 const struct input_keymap_entry *ke, 836 unsigned int *old_keycode) 837 { 838 unsigned int index; 839 int error; 840 int i; 841 842 if (!dev->keycodesize) 843 return -EINVAL; 844 845 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 846 index = ke->index; 847 } else { 848 error = input_scancode_to_scalar(ke, &index); 849 if (error) 850 return error; 851 } 852 853 if (index >= dev->keycodemax) 854 return -EINVAL; 855 856 if (dev->keycodesize < sizeof(ke->keycode) && 857 (ke->keycode >> (dev->keycodesize * 8))) 858 return -EINVAL; 859 860 switch (dev->keycodesize) { 861 case 1: { 862 u8 *k = (u8 *)dev->keycode; 863 *old_keycode = k[index]; 864 k[index] = ke->keycode; 865 break; 866 } 867 case 2: { 868 u16 *k = (u16 *)dev->keycode; 869 *old_keycode = k[index]; 870 k[index] = ke->keycode; 871 break; 872 } 873 default: { 874 u32 *k = (u32 *)dev->keycode; 875 *old_keycode = k[index]; 876 k[index] = ke->keycode; 877 break; 878 } 879 } 880 881 if (*old_keycode <= KEY_MAX) { 882 __clear_bit(*old_keycode, dev->keybit); 883 for (i = 0; i < dev->keycodemax; i++) { 884 if (input_fetch_keycode(dev, i) == *old_keycode) { 885 __set_bit(*old_keycode, dev->keybit); 886 /* Setting the bit twice is useless, so break */ 887 break; 888 } 889 } 890 } 891 892 __set_bit(ke->keycode, dev->keybit); 893 return 0; 894 } 895 896 /** 897 * input_get_keycode - retrieve keycode currently mapped to a given scancode 898 * @dev: input device which keymap is being queried 899 * @ke: keymap entry 900 * 901 * This function should be called by anyone interested in retrieving current 902 * keymap. Presently evdev handlers use it. 903 */ 904 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 905 { 906 unsigned long flags; 907 int retval; 908 909 spin_lock_irqsave(&dev->event_lock, flags); 910 retval = dev->getkeycode(dev, ke); 911 spin_unlock_irqrestore(&dev->event_lock, flags); 912 913 return retval; 914 } 915 EXPORT_SYMBOL(input_get_keycode); 916 917 /** 918 * input_set_keycode - attribute a keycode to a given scancode 919 * @dev: input device which keymap is being updated 920 * @ke: new keymap entry 921 * 922 * This function should be called by anyone needing to update current 923 * keymap. Presently keyboard and evdev handlers use it. 924 */ 925 int input_set_keycode(struct input_dev *dev, 926 const struct input_keymap_entry *ke) 927 { 928 unsigned long flags; 929 unsigned int old_keycode; 930 int retval; 931 932 if (ke->keycode > KEY_MAX) 933 return -EINVAL; 934 935 spin_lock_irqsave(&dev->event_lock, flags); 936 937 retval = dev->setkeycode(dev, ke, &old_keycode); 938 if (retval) 939 goto out; 940 941 /* Make sure KEY_RESERVED did not get enabled. */ 942 __clear_bit(KEY_RESERVED, dev->keybit); 943 944 /* 945 * Simulate keyup event if keycode is not present 946 * in the keymap anymore 947 */ 948 if (old_keycode > KEY_MAX) { 949 dev_warn(dev->dev.parent ?: &dev->dev, 950 "%s: got too big old keycode %#x\n", 951 __func__, old_keycode); 952 } else if (test_bit(EV_KEY, dev->evbit) && 953 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 954 __test_and_clear_bit(old_keycode, dev->key)) { 955 struct input_value vals[] = { 956 { EV_KEY, old_keycode, 0 }, 957 input_value_sync 958 }; 959 960 input_pass_values(dev, vals, ARRAY_SIZE(vals)); 961 } 962 963 out: 964 spin_unlock_irqrestore(&dev->event_lock, flags); 965 966 return retval; 967 } 968 EXPORT_SYMBOL(input_set_keycode); 969 970 bool input_match_device_id(const struct input_dev *dev, 971 const struct input_device_id *id) 972 { 973 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 974 if (id->bustype != dev->id.bustype) 975 return false; 976 977 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 978 if (id->vendor != dev->id.vendor) 979 return false; 980 981 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 982 if (id->product != dev->id.product) 983 return false; 984 985 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 986 if (id->version != dev->id.version) 987 return false; 988 989 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 990 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 991 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 992 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 993 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 994 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 995 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 996 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 997 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 998 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 999 return false; 1000 } 1001 1002 return true; 1003 } 1004 EXPORT_SYMBOL(input_match_device_id); 1005 1006 static const struct input_device_id *input_match_device(struct input_handler *handler, 1007 struct input_dev *dev) 1008 { 1009 const struct input_device_id *id; 1010 1011 for (id = handler->id_table; id->flags || id->driver_info; id++) { 1012 if (input_match_device_id(dev, id) && 1013 (!handler->match || handler->match(handler, dev))) { 1014 return id; 1015 } 1016 } 1017 1018 return NULL; 1019 } 1020 1021 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 1022 { 1023 const struct input_device_id *id; 1024 int error; 1025 1026 id = input_match_device(handler, dev); 1027 if (!id) 1028 return -ENODEV; 1029 1030 error = handler->connect(handler, dev, id); 1031 if (error && error != -ENODEV) 1032 pr_err("failed to attach handler %s to device %s, error: %d\n", 1033 handler->name, kobject_name(&dev->dev.kobj), error); 1034 1035 return error; 1036 } 1037 1038 #ifdef CONFIG_COMPAT 1039 1040 static int input_bits_to_string(char *buf, int buf_size, 1041 unsigned long bits, bool skip_empty) 1042 { 1043 int len = 0; 1044 1045 if (in_compat_syscall()) { 1046 u32 dword = bits >> 32; 1047 if (dword || !skip_empty) 1048 len += snprintf(buf, buf_size, "%x ", dword); 1049 1050 dword = bits & 0xffffffffUL; 1051 if (dword || !skip_empty || len) 1052 len += snprintf(buf + len, max(buf_size - len, 0), 1053 "%x", dword); 1054 } else { 1055 if (bits || !skip_empty) 1056 len += snprintf(buf, buf_size, "%lx", bits); 1057 } 1058 1059 return len; 1060 } 1061 1062 #else /* !CONFIG_COMPAT */ 1063 1064 static int input_bits_to_string(char *buf, int buf_size, 1065 unsigned long bits, bool skip_empty) 1066 { 1067 return bits || !skip_empty ? 1068 snprintf(buf, buf_size, "%lx", bits) : 0; 1069 } 1070 1071 #endif 1072 1073 #ifdef CONFIG_PROC_FS 1074 1075 static struct proc_dir_entry *proc_bus_input_dir; 1076 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1077 static int input_devices_state; 1078 1079 static inline void input_wakeup_procfs_readers(void) 1080 { 1081 input_devices_state++; 1082 wake_up(&input_devices_poll_wait); 1083 } 1084 1085 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1086 { 1087 poll_wait(file, &input_devices_poll_wait, wait); 1088 if (file->f_version != input_devices_state) { 1089 file->f_version = input_devices_state; 1090 return EPOLLIN | EPOLLRDNORM; 1091 } 1092 1093 return 0; 1094 } 1095 1096 union input_seq_state { 1097 struct { 1098 unsigned short pos; 1099 bool mutex_acquired; 1100 }; 1101 void *p; 1102 }; 1103 1104 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1105 { 1106 union input_seq_state *state = (union input_seq_state *)&seq->private; 1107 int error; 1108 1109 /* We need to fit into seq->private pointer */ 1110 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1111 1112 error = mutex_lock_interruptible(&input_mutex); 1113 if (error) { 1114 state->mutex_acquired = false; 1115 return ERR_PTR(error); 1116 } 1117 1118 state->mutex_acquired = true; 1119 1120 return seq_list_start(&input_dev_list, *pos); 1121 } 1122 1123 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1124 { 1125 return seq_list_next(v, &input_dev_list, pos); 1126 } 1127 1128 static void input_seq_stop(struct seq_file *seq, void *v) 1129 { 1130 union input_seq_state *state = (union input_seq_state *)&seq->private; 1131 1132 if (state->mutex_acquired) 1133 mutex_unlock(&input_mutex); 1134 } 1135 1136 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1137 unsigned long *bitmap, int max) 1138 { 1139 int i; 1140 bool skip_empty = true; 1141 char buf[18]; 1142 1143 seq_printf(seq, "B: %s=", name); 1144 1145 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1146 if (input_bits_to_string(buf, sizeof(buf), 1147 bitmap[i], skip_empty)) { 1148 skip_empty = false; 1149 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1150 } 1151 } 1152 1153 /* 1154 * If no output was produced print a single 0. 1155 */ 1156 if (skip_empty) 1157 seq_putc(seq, '0'); 1158 1159 seq_putc(seq, '\n'); 1160 } 1161 1162 static int input_devices_seq_show(struct seq_file *seq, void *v) 1163 { 1164 struct input_dev *dev = container_of(v, struct input_dev, node); 1165 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1166 struct input_handle *handle; 1167 1168 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1169 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1170 1171 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1172 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1173 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1174 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1175 seq_puts(seq, "H: Handlers="); 1176 1177 list_for_each_entry(handle, &dev->h_list, d_node) 1178 seq_printf(seq, "%s ", handle->name); 1179 seq_putc(seq, '\n'); 1180 1181 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1182 1183 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1184 if (test_bit(EV_KEY, dev->evbit)) 1185 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1186 if (test_bit(EV_REL, dev->evbit)) 1187 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1188 if (test_bit(EV_ABS, dev->evbit)) 1189 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1190 if (test_bit(EV_MSC, dev->evbit)) 1191 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1192 if (test_bit(EV_LED, dev->evbit)) 1193 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1194 if (test_bit(EV_SND, dev->evbit)) 1195 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1196 if (test_bit(EV_FF, dev->evbit)) 1197 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1198 if (test_bit(EV_SW, dev->evbit)) 1199 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1200 1201 seq_putc(seq, '\n'); 1202 1203 kfree(path); 1204 return 0; 1205 } 1206 1207 static const struct seq_operations input_devices_seq_ops = { 1208 .start = input_devices_seq_start, 1209 .next = input_devices_seq_next, 1210 .stop = input_seq_stop, 1211 .show = input_devices_seq_show, 1212 }; 1213 1214 static int input_proc_devices_open(struct inode *inode, struct file *file) 1215 { 1216 return seq_open(file, &input_devices_seq_ops); 1217 } 1218 1219 static const struct proc_ops input_devices_proc_ops = { 1220 .proc_open = input_proc_devices_open, 1221 .proc_poll = input_proc_devices_poll, 1222 .proc_read = seq_read, 1223 .proc_lseek = seq_lseek, 1224 .proc_release = seq_release, 1225 }; 1226 1227 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1228 { 1229 union input_seq_state *state = (union input_seq_state *)&seq->private; 1230 int error; 1231 1232 /* We need to fit into seq->private pointer */ 1233 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1234 1235 error = mutex_lock_interruptible(&input_mutex); 1236 if (error) { 1237 state->mutex_acquired = false; 1238 return ERR_PTR(error); 1239 } 1240 1241 state->mutex_acquired = true; 1242 state->pos = *pos; 1243 1244 return seq_list_start(&input_handler_list, *pos); 1245 } 1246 1247 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1248 { 1249 union input_seq_state *state = (union input_seq_state *)&seq->private; 1250 1251 state->pos = *pos + 1; 1252 return seq_list_next(v, &input_handler_list, pos); 1253 } 1254 1255 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1256 { 1257 struct input_handler *handler = container_of(v, struct input_handler, node); 1258 union input_seq_state *state = (union input_seq_state *)&seq->private; 1259 1260 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1261 if (handler->filter) 1262 seq_puts(seq, " (filter)"); 1263 if (handler->legacy_minors) 1264 seq_printf(seq, " Minor=%d", handler->minor); 1265 seq_putc(seq, '\n'); 1266 1267 return 0; 1268 } 1269 1270 static const struct seq_operations input_handlers_seq_ops = { 1271 .start = input_handlers_seq_start, 1272 .next = input_handlers_seq_next, 1273 .stop = input_seq_stop, 1274 .show = input_handlers_seq_show, 1275 }; 1276 1277 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1278 { 1279 return seq_open(file, &input_handlers_seq_ops); 1280 } 1281 1282 static const struct proc_ops input_handlers_proc_ops = { 1283 .proc_open = input_proc_handlers_open, 1284 .proc_read = seq_read, 1285 .proc_lseek = seq_lseek, 1286 .proc_release = seq_release, 1287 }; 1288 1289 static int __init input_proc_init(void) 1290 { 1291 struct proc_dir_entry *entry; 1292 1293 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1294 if (!proc_bus_input_dir) 1295 return -ENOMEM; 1296 1297 entry = proc_create("devices", 0, proc_bus_input_dir, 1298 &input_devices_proc_ops); 1299 if (!entry) 1300 goto fail1; 1301 1302 entry = proc_create("handlers", 0, proc_bus_input_dir, 1303 &input_handlers_proc_ops); 1304 if (!entry) 1305 goto fail2; 1306 1307 return 0; 1308 1309 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1310 fail1: remove_proc_entry("bus/input", NULL); 1311 return -ENOMEM; 1312 } 1313 1314 static void input_proc_exit(void) 1315 { 1316 remove_proc_entry("devices", proc_bus_input_dir); 1317 remove_proc_entry("handlers", proc_bus_input_dir); 1318 remove_proc_entry("bus/input", NULL); 1319 } 1320 1321 #else /* !CONFIG_PROC_FS */ 1322 static inline void input_wakeup_procfs_readers(void) { } 1323 static inline int input_proc_init(void) { return 0; } 1324 static inline void input_proc_exit(void) { } 1325 #endif 1326 1327 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1328 static ssize_t input_dev_show_##name(struct device *dev, \ 1329 struct device_attribute *attr, \ 1330 char *buf) \ 1331 { \ 1332 struct input_dev *input_dev = to_input_dev(dev); \ 1333 \ 1334 return scnprintf(buf, PAGE_SIZE, "%s\n", \ 1335 input_dev->name ? input_dev->name : ""); \ 1336 } \ 1337 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1338 1339 INPUT_DEV_STRING_ATTR_SHOW(name); 1340 INPUT_DEV_STRING_ATTR_SHOW(phys); 1341 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1342 1343 static int input_print_modalias_bits(char *buf, int size, 1344 char name, unsigned long *bm, 1345 unsigned int min_bit, unsigned int max_bit) 1346 { 1347 int len = 0, i; 1348 1349 len += snprintf(buf, max(size, 0), "%c", name); 1350 for (i = min_bit; i < max_bit; i++) 1351 if (bm[BIT_WORD(i)] & BIT_MASK(i)) 1352 len += snprintf(buf + len, max(size - len, 0), "%X,", i); 1353 return len; 1354 } 1355 1356 static int input_print_modalias(char *buf, int size, struct input_dev *id, 1357 int add_cr) 1358 { 1359 int len; 1360 1361 len = snprintf(buf, max(size, 0), 1362 "input:b%04Xv%04Xp%04Xe%04X-", 1363 id->id.bustype, id->id.vendor, 1364 id->id.product, id->id.version); 1365 1366 len += input_print_modalias_bits(buf + len, size - len, 1367 'e', id->evbit, 0, EV_MAX); 1368 len += input_print_modalias_bits(buf + len, size - len, 1369 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1370 len += input_print_modalias_bits(buf + len, size - len, 1371 'r', id->relbit, 0, REL_MAX); 1372 len += input_print_modalias_bits(buf + len, size - len, 1373 'a', id->absbit, 0, ABS_MAX); 1374 len += input_print_modalias_bits(buf + len, size - len, 1375 'm', id->mscbit, 0, MSC_MAX); 1376 len += input_print_modalias_bits(buf + len, size - len, 1377 'l', id->ledbit, 0, LED_MAX); 1378 len += input_print_modalias_bits(buf + len, size - len, 1379 's', id->sndbit, 0, SND_MAX); 1380 len += input_print_modalias_bits(buf + len, size - len, 1381 'f', id->ffbit, 0, FF_MAX); 1382 len += input_print_modalias_bits(buf + len, size - len, 1383 'w', id->swbit, 0, SW_MAX); 1384 1385 if (add_cr) 1386 len += snprintf(buf + len, max(size - len, 0), "\n"); 1387 1388 return len; 1389 } 1390 1391 static ssize_t input_dev_show_modalias(struct device *dev, 1392 struct device_attribute *attr, 1393 char *buf) 1394 { 1395 struct input_dev *id = to_input_dev(dev); 1396 ssize_t len; 1397 1398 len = input_print_modalias(buf, PAGE_SIZE, id, 1); 1399 1400 return min_t(int, len, PAGE_SIZE); 1401 } 1402 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1403 1404 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1405 int max, int add_cr); 1406 1407 static ssize_t input_dev_show_properties(struct device *dev, 1408 struct device_attribute *attr, 1409 char *buf) 1410 { 1411 struct input_dev *input_dev = to_input_dev(dev); 1412 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1413 INPUT_PROP_MAX, true); 1414 return min_t(int, len, PAGE_SIZE); 1415 } 1416 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1417 1418 static struct attribute *input_dev_attrs[] = { 1419 &dev_attr_name.attr, 1420 &dev_attr_phys.attr, 1421 &dev_attr_uniq.attr, 1422 &dev_attr_modalias.attr, 1423 &dev_attr_properties.attr, 1424 NULL 1425 }; 1426 1427 static const struct attribute_group input_dev_attr_group = { 1428 .attrs = input_dev_attrs, 1429 }; 1430 1431 #define INPUT_DEV_ID_ATTR(name) \ 1432 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1433 struct device_attribute *attr, \ 1434 char *buf) \ 1435 { \ 1436 struct input_dev *input_dev = to_input_dev(dev); \ 1437 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \ 1438 } \ 1439 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1440 1441 INPUT_DEV_ID_ATTR(bustype); 1442 INPUT_DEV_ID_ATTR(vendor); 1443 INPUT_DEV_ID_ATTR(product); 1444 INPUT_DEV_ID_ATTR(version); 1445 1446 static struct attribute *input_dev_id_attrs[] = { 1447 &dev_attr_bustype.attr, 1448 &dev_attr_vendor.attr, 1449 &dev_attr_product.attr, 1450 &dev_attr_version.attr, 1451 NULL 1452 }; 1453 1454 static const struct attribute_group input_dev_id_attr_group = { 1455 .name = "id", 1456 .attrs = input_dev_id_attrs, 1457 }; 1458 1459 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1460 int max, int add_cr) 1461 { 1462 int i; 1463 int len = 0; 1464 bool skip_empty = true; 1465 1466 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1467 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1468 bitmap[i], skip_empty); 1469 if (len) { 1470 skip_empty = false; 1471 if (i > 0) 1472 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1473 } 1474 } 1475 1476 /* 1477 * If no output was produced print a single 0. 1478 */ 1479 if (len == 0) 1480 len = snprintf(buf, buf_size, "%d", 0); 1481 1482 if (add_cr) 1483 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1484 1485 return len; 1486 } 1487 1488 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1489 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1490 struct device_attribute *attr, \ 1491 char *buf) \ 1492 { \ 1493 struct input_dev *input_dev = to_input_dev(dev); \ 1494 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1495 input_dev->bm##bit, ev##_MAX, \ 1496 true); \ 1497 return min_t(int, len, PAGE_SIZE); \ 1498 } \ 1499 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1500 1501 INPUT_DEV_CAP_ATTR(EV, ev); 1502 INPUT_DEV_CAP_ATTR(KEY, key); 1503 INPUT_DEV_CAP_ATTR(REL, rel); 1504 INPUT_DEV_CAP_ATTR(ABS, abs); 1505 INPUT_DEV_CAP_ATTR(MSC, msc); 1506 INPUT_DEV_CAP_ATTR(LED, led); 1507 INPUT_DEV_CAP_ATTR(SND, snd); 1508 INPUT_DEV_CAP_ATTR(FF, ff); 1509 INPUT_DEV_CAP_ATTR(SW, sw); 1510 1511 static struct attribute *input_dev_caps_attrs[] = { 1512 &dev_attr_ev.attr, 1513 &dev_attr_key.attr, 1514 &dev_attr_rel.attr, 1515 &dev_attr_abs.attr, 1516 &dev_attr_msc.attr, 1517 &dev_attr_led.attr, 1518 &dev_attr_snd.attr, 1519 &dev_attr_ff.attr, 1520 &dev_attr_sw.attr, 1521 NULL 1522 }; 1523 1524 static const struct attribute_group input_dev_caps_attr_group = { 1525 .name = "capabilities", 1526 .attrs = input_dev_caps_attrs, 1527 }; 1528 1529 static const struct attribute_group *input_dev_attr_groups[] = { 1530 &input_dev_attr_group, 1531 &input_dev_id_attr_group, 1532 &input_dev_caps_attr_group, 1533 &input_poller_attribute_group, 1534 NULL 1535 }; 1536 1537 static void input_dev_release(struct device *device) 1538 { 1539 struct input_dev *dev = to_input_dev(device); 1540 1541 input_ff_destroy(dev); 1542 input_mt_destroy_slots(dev); 1543 kfree(dev->poller); 1544 kfree(dev->absinfo); 1545 kfree(dev->vals); 1546 kfree(dev); 1547 1548 module_put(THIS_MODULE); 1549 } 1550 1551 /* 1552 * Input uevent interface - loading event handlers based on 1553 * device bitfields. 1554 */ 1555 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1556 const char *name, unsigned long *bitmap, int max) 1557 { 1558 int len; 1559 1560 if (add_uevent_var(env, "%s", name)) 1561 return -ENOMEM; 1562 1563 len = input_print_bitmap(&env->buf[env->buflen - 1], 1564 sizeof(env->buf) - env->buflen, 1565 bitmap, max, false); 1566 if (len >= (sizeof(env->buf) - env->buflen)) 1567 return -ENOMEM; 1568 1569 env->buflen += len; 1570 return 0; 1571 } 1572 1573 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1574 struct input_dev *dev) 1575 { 1576 int len; 1577 1578 if (add_uevent_var(env, "MODALIAS=")) 1579 return -ENOMEM; 1580 1581 len = input_print_modalias(&env->buf[env->buflen - 1], 1582 sizeof(env->buf) - env->buflen, 1583 dev, 0); 1584 if (len >= (sizeof(env->buf) - env->buflen)) 1585 return -ENOMEM; 1586 1587 env->buflen += len; 1588 return 0; 1589 } 1590 1591 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1592 do { \ 1593 int err = add_uevent_var(env, fmt, val); \ 1594 if (err) \ 1595 return err; \ 1596 } while (0) 1597 1598 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1599 do { \ 1600 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1601 if (err) \ 1602 return err; \ 1603 } while (0) 1604 1605 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1606 do { \ 1607 int err = input_add_uevent_modalias_var(env, dev); \ 1608 if (err) \ 1609 return err; \ 1610 } while (0) 1611 1612 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) 1613 { 1614 struct input_dev *dev = to_input_dev(device); 1615 1616 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1617 dev->id.bustype, dev->id.vendor, 1618 dev->id.product, dev->id.version); 1619 if (dev->name) 1620 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1621 if (dev->phys) 1622 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1623 if (dev->uniq) 1624 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1625 1626 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1627 1628 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1629 if (test_bit(EV_KEY, dev->evbit)) 1630 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1631 if (test_bit(EV_REL, dev->evbit)) 1632 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1633 if (test_bit(EV_ABS, dev->evbit)) 1634 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1635 if (test_bit(EV_MSC, dev->evbit)) 1636 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1637 if (test_bit(EV_LED, dev->evbit)) 1638 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1639 if (test_bit(EV_SND, dev->evbit)) 1640 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1641 if (test_bit(EV_FF, dev->evbit)) 1642 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1643 if (test_bit(EV_SW, dev->evbit)) 1644 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1645 1646 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1647 1648 return 0; 1649 } 1650 1651 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1652 do { \ 1653 int i; \ 1654 bool active; \ 1655 \ 1656 if (!test_bit(EV_##type, dev->evbit)) \ 1657 break; \ 1658 \ 1659 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1660 active = test_bit(i, dev->bits); \ 1661 if (!active && !on) \ 1662 continue; \ 1663 \ 1664 dev->event(dev, EV_##type, i, on ? active : 0); \ 1665 } \ 1666 } while (0) 1667 1668 static void input_dev_toggle(struct input_dev *dev, bool activate) 1669 { 1670 if (!dev->event) 1671 return; 1672 1673 INPUT_DO_TOGGLE(dev, LED, led, activate); 1674 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1675 1676 if (activate && test_bit(EV_REP, dev->evbit)) { 1677 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1678 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1679 } 1680 } 1681 1682 /** 1683 * input_reset_device() - reset/restore the state of input device 1684 * @dev: input device whose state needs to be reset 1685 * 1686 * This function tries to reset the state of an opened input device and 1687 * bring internal state and state if the hardware in sync with each other. 1688 * We mark all keys as released, restore LED state, repeat rate, etc. 1689 */ 1690 void input_reset_device(struct input_dev *dev) 1691 { 1692 unsigned long flags; 1693 1694 mutex_lock(&dev->mutex); 1695 spin_lock_irqsave(&dev->event_lock, flags); 1696 1697 input_dev_toggle(dev, true); 1698 input_dev_release_keys(dev); 1699 1700 spin_unlock_irqrestore(&dev->event_lock, flags); 1701 mutex_unlock(&dev->mutex); 1702 } 1703 EXPORT_SYMBOL(input_reset_device); 1704 1705 #ifdef CONFIG_PM_SLEEP 1706 static int input_dev_suspend(struct device *dev) 1707 { 1708 struct input_dev *input_dev = to_input_dev(dev); 1709 1710 spin_lock_irq(&input_dev->event_lock); 1711 1712 /* 1713 * Keys that are pressed now are unlikely to be 1714 * still pressed when we resume. 1715 */ 1716 input_dev_release_keys(input_dev); 1717 1718 /* Turn off LEDs and sounds, if any are active. */ 1719 input_dev_toggle(input_dev, false); 1720 1721 spin_unlock_irq(&input_dev->event_lock); 1722 1723 return 0; 1724 } 1725 1726 static int input_dev_resume(struct device *dev) 1727 { 1728 struct input_dev *input_dev = to_input_dev(dev); 1729 1730 spin_lock_irq(&input_dev->event_lock); 1731 1732 /* Restore state of LEDs and sounds, if any were active. */ 1733 input_dev_toggle(input_dev, true); 1734 1735 spin_unlock_irq(&input_dev->event_lock); 1736 1737 return 0; 1738 } 1739 1740 static int input_dev_freeze(struct device *dev) 1741 { 1742 struct input_dev *input_dev = to_input_dev(dev); 1743 1744 spin_lock_irq(&input_dev->event_lock); 1745 1746 /* 1747 * Keys that are pressed now are unlikely to be 1748 * still pressed when we resume. 1749 */ 1750 input_dev_release_keys(input_dev); 1751 1752 spin_unlock_irq(&input_dev->event_lock); 1753 1754 return 0; 1755 } 1756 1757 static int input_dev_poweroff(struct device *dev) 1758 { 1759 struct input_dev *input_dev = to_input_dev(dev); 1760 1761 spin_lock_irq(&input_dev->event_lock); 1762 1763 /* Turn off LEDs and sounds, if any are active. */ 1764 input_dev_toggle(input_dev, false); 1765 1766 spin_unlock_irq(&input_dev->event_lock); 1767 1768 return 0; 1769 } 1770 1771 static const struct dev_pm_ops input_dev_pm_ops = { 1772 .suspend = input_dev_suspend, 1773 .resume = input_dev_resume, 1774 .freeze = input_dev_freeze, 1775 .poweroff = input_dev_poweroff, 1776 .restore = input_dev_resume, 1777 }; 1778 #endif /* CONFIG_PM */ 1779 1780 static const struct device_type input_dev_type = { 1781 .groups = input_dev_attr_groups, 1782 .release = input_dev_release, 1783 .uevent = input_dev_uevent, 1784 #ifdef CONFIG_PM_SLEEP 1785 .pm = &input_dev_pm_ops, 1786 #endif 1787 }; 1788 1789 static char *input_devnode(struct device *dev, umode_t *mode) 1790 { 1791 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1792 } 1793 1794 struct class input_class = { 1795 .name = "input", 1796 .devnode = input_devnode, 1797 }; 1798 EXPORT_SYMBOL_GPL(input_class); 1799 1800 /** 1801 * input_allocate_device - allocate memory for new input device 1802 * 1803 * Returns prepared struct input_dev or %NULL. 1804 * 1805 * NOTE: Use input_free_device() to free devices that have not been 1806 * registered; input_unregister_device() should be used for already 1807 * registered devices. 1808 */ 1809 struct input_dev *input_allocate_device(void) 1810 { 1811 static atomic_t input_no = ATOMIC_INIT(-1); 1812 struct input_dev *dev; 1813 1814 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1815 if (dev) { 1816 dev->dev.type = &input_dev_type; 1817 dev->dev.class = &input_class; 1818 device_initialize(&dev->dev); 1819 mutex_init(&dev->mutex); 1820 spin_lock_init(&dev->event_lock); 1821 timer_setup(&dev->timer, NULL, 0); 1822 INIT_LIST_HEAD(&dev->h_list); 1823 INIT_LIST_HEAD(&dev->node); 1824 1825 dev_set_name(&dev->dev, "input%lu", 1826 (unsigned long)atomic_inc_return(&input_no)); 1827 1828 __module_get(THIS_MODULE); 1829 } 1830 1831 return dev; 1832 } 1833 EXPORT_SYMBOL(input_allocate_device); 1834 1835 struct input_devres { 1836 struct input_dev *input; 1837 }; 1838 1839 static int devm_input_device_match(struct device *dev, void *res, void *data) 1840 { 1841 struct input_devres *devres = res; 1842 1843 return devres->input == data; 1844 } 1845 1846 static void devm_input_device_release(struct device *dev, void *res) 1847 { 1848 struct input_devres *devres = res; 1849 struct input_dev *input = devres->input; 1850 1851 dev_dbg(dev, "%s: dropping reference to %s\n", 1852 __func__, dev_name(&input->dev)); 1853 input_put_device(input); 1854 } 1855 1856 /** 1857 * devm_input_allocate_device - allocate managed input device 1858 * @dev: device owning the input device being created 1859 * 1860 * Returns prepared struct input_dev or %NULL. 1861 * 1862 * Managed input devices do not need to be explicitly unregistered or 1863 * freed as it will be done automatically when owner device unbinds from 1864 * its driver (or binding fails). Once managed input device is allocated, 1865 * it is ready to be set up and registered in the same fashion as regular 1866 * input device. There are no special devm_input_device_[un]register() 1867 * variants, regular ones work with both managed and unmanaged devices, 1868 * should you need them. In most cases however, managed input device need 1869 * not be explicitly unregistered or freed. 1870 * 1871 * NOTE: the owner device is set up as parent of input device and users 1872 * should not override it. 1873 */ 1874 struct input_dev *devm_input_allocate_device(struct device *dev) 1875 { 1876 struct input_dev *input; 1877 struct input_devres *devres; 1878 1879 devres = devres_alloc(devm_input_device_release, 1880 sizeof(*devres), GFP_KERNEL); 1881 if (!devres) 1882 return NULL; 1883 1884 input = input_allocate_device(); 1885 if (!input) { 1886 devres_free(devres); 1887 return NULL; 1888 } 1889 1890 input->dev.parent = dev; 1891 input->devres_managed = true; 1892 1893 devres->input = input; 1894 devres_add(dev, devres); 1895 1896 return input; 1897 } 1898 EXPORT_SYMBOL(devm_input_allocate_device); 1899 1900 /** 1901 * input_free_device - free memory occupied by input_dev structure 1902 * @dev: input device to free 1903 * 1904 * This function should only be used if input_register_device() 1905 * was not called yet or if it failed. Once device was registered 1906 * use input_unregister_device() and memory will be freed once last 1907 * reference to the device is dropped. 1908 * 1909 * Device should be allocated by input_allocate_device(). 1910 * 1911 * NOTE: If there are references to the input device then memory 1912 * will not be freed until last reference is dropped. 1913 */ 1914 void input_free_device(struct input_dev *dev) 1915 { 1916 if (dev) { 1917 if (dev->devres_managed) 1918 WARN_ON(devres_destroy(dev->dev.parent, 1919 devm_input_device_release, 1920 devm_input_device_match, 1921 dev)); 1922 input_put_device(dev); 1923 } 1924 } 1925 EXPORT_SYMBOL(input_free_device); 1926 1927 /** 1928 * input_set_timestamp - set timestamp for input events 1929 * @dev: input device to set timestamp for 1930 * @timestamp: the time at which the event has occurred 1931 * in CLOCK_MONOTONIC 1932 * 1933 * This function is intended to provide to the input system a more 1934 * accurate time of when an event actually occurred. The driver should 1935 * call this function as soon as a timestamp is acquired ensuring 1936 * clock conversions in input_set_timestamp are done correctly. 1937 * 1938 * The system entering suspend state between timestamp acquisition and 1939 * calling input_set_timestamp can result in inaccurate conversions. 1940 */ 1941 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp) 1942 { 1943 dev->timestamp[INPUT_CLK_MONO] = timestamp; 1944 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp); 1945 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp, 1946 TK_OFFS_BOOT); 1947 } 1948 EXPORT_SYMBOL(input_set_timestamp); 1949 1950 /** 1951 * input_get_timestamp - get timestamp for input events 1952 * @dev: input device to get timestamp from 1953 * 1954 * A valid timestamp is a timestamp of non-zero value. 1955 */ 1956 ktime_t *input_get_timestamp(struct input_dev *dev) 1957 { 1958 const ktime_t invalid_timestamp = ktime_set(0, 0); 1959 1960 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp)) 1961 input_set_timestamp(dev, ktime_get()); 1962 1963 return dev->timestamp; 1964 } 1965 EXPORT_SYMBOL(input_get_timestamp); 1966 1967 /** 1968 * input_set_capability - mark device as capable of a certain event 1969 * @dev: device that is capable of emitting or accepting event 1970 * @type: type of the event (EV_KEY, EV_REL, etc...) 1971 * @code: event code 1972 * 1973 * In addition to setting up corresponding bit in appropriate capability 1974 * bitmap the function also adjusts dev->evbit. 1975 */ 1976 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 1977 { 1978 switch (type) { 1979 case EV_KEY: 1980 __set_bit(code, dev->keybit); 1981 break; 1982 1983 case EV_REL: 1984 __set_bit(code, dev->relbit); 1985 break; 1986 1987 case EV_ABS: 1988 input_alloc_absinfo(dev); 1989 if (!dev->absinfo) 1990 return; 1991 1992 __set_bit(code, dev->absbit); 1993 break; 1994 1995 case EV_MSC: 1996 __set_bit(code, dev->mscbit); 1997 break; 1998 1999 case EV_SW: 2000 __set_bit(code, dev->swbit); 2001 break; 2002 2003 case EV_LED: 2004 __set_bit(code, dev->ledbit); 2005 break; 2006 2007 case EV_SND: 2008 __set_bit(code, dev->sndbit); 2009 break; 2010 2011 case EV_FF: 2012 __set_bit(code, dev->ffbit); 2013 break; 2014 2015 case EV_PWR: 2016 /* do nothing */ 2017 break; 2018 2019 default: 2020 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code); 2021 dump_stack(); 2022 return; 2023 } 2024 2025 __set_bit(type, dev->evbit); 2026 } 2027 EXPORT_SYMBOL(input_set_capability); 2028 2029 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 2030 { 2031 int mt_slots; 2032 int i; 2033 unsigned int events; 2034 2035 if (dev->mt) { 2036 mt_slots = dev->mt->num_slots; 2037 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 2038 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 2039 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 2040 mt_slots = clamp(mt_slots, 2, 32); 2041 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 2042 mt_slots = 2; 2043 } else { 2044 mt_slots = 0; 2045 } 2046 2047 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 2048 2049 if (test_bit(EV_ABS, dev->evbit)) 2050 for_each_set_bit(i, dev->absbit, ABS_CNT) 2051 events += input_is_mt_axis(i) ? mt_slots : 1; 2052 2053 if (test_bit(EV_REL, dev->evbit)) 2054 events += bitmap_weight(dev->relbit, REL_CNT); 2055 2056 /* Make room for KEY and MSC events */ 2057 events += 7; 2058 2059 return events; 2060 } 2061 2062 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 2063 do { \ 2064 if (!test_bit(EV_##type, dev->evbit)) \ 2065 memset(dev->bits##bit, 0, \ 2066 sizeof(dev->bits##bit)); \ 2067 } while (0) 2068 2069 static void input_cleanse_bitmasks(struct input_dev *dev) 2070 { 2071 INPUT_CLEANSE_BITMASK(dev, KEY, key); 2072 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2073 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2074 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2075 INPUT_CLEANSE_BITMASK(dev, LED, led); 2076 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2077 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2078 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2079 } 2080 2081 static void __input_unregister_device(struct input_dev *dev) 2082 { 2083 struct input_handle *handle, *next; 2084 2085 input_disconnect_device(dev); 2086 2087 mutex_lock(&input_mutex); 2088 2089 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2090 handle->handler->disconnect(handle); 2091 WARN_ON(!list_empty(&dev->h_list)); 2092 2093 del_timer_sync(&dev->timer); 2094 list_del_init(&dev->node); 2095 2096 input_wakeup_procfs_readers(); 2097 2098 mutex_unlock(&input_mutex); 2099 2100 device_del(&dev->dev); 2101 } 2102 2103 static void devm_input_device_unregister(struct device *dev, void *res) 2104 { 2105 struct input_devres *devres = res; 2106 struct input_dev *input = devres->input; 2107 2108 dev_dbg(dev, "%s: unregistering device %s\n", 2109 __func__, dev_name(&input->dev)); 2110 __input_unregister_device(input); 2111 } 2112 2113 /** 2114 * input_enable_softrepeat - enable software autorepeat 2115 * @dev: input device 2116 * @delay: repeat delay 2117 * @period: repeat period 2118 * 2119 * Enable software autorepeat on the input device. 2120 */ 2121 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2122 { 2123 dev->timer.function = input_repeat_key; 2124 dev->rep[REP_DELAY] = delay; 2125 dev->rep[REP_PERIOD] = period; 2126 } 2127 EXPORT_SYMBOL(input_enable_softrepeat); 2128 2129 /** 2130 * input_register_device - register device with input core 2131 * @dev: device to be registered 2132 * 2133 * This function registers device with input core. The device must be 2134 * allocated with input_allocate_device() and all it's capabilities 2135 * set up before registering. 2136 * If function fails the device must be freed with input_free_device(). 2137 * Once device has been successfully registered it can be unregistered 2138 * with input_unregister_device(); input_free_device() should not be 2139 * called in this case. 2140 * 2141 * Note that this function is also used to register managed input devices 2142 * (ones allocated with devm_input_allocate_device()). Such managed input 2143 * devices need not be explicitly unregistered or freed, their tear down 2144 * is controlled by the devres infrastructure. It is also worth noting 2145 * that tear down of managed input devices is internally a 2-step process: 2146 * registered managed input device is first unregistered, but stays in 2147 * memory and can still handle input_event() calls (although events will 2148 * not be delivered anywhere). The freeing of managed input device will 2149 * happen later, when devres stack is unwound to the point where device 2150 * allocation was made. 2151 */ 2152 int input_register_device(struct input_dev *dev) 2153 { 2154 struct input_devres *devres = NULL; 2155 struct input_handler *handler; 2156 unsigned int packet_size; 2157 const char *path; 2158 int error; 2159 2160 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2161 dev_err(&dev->dev, 2162 "Absolute device without dev->absinfo, refusing to register\n"); 2163 return -EINVAL; 2164 } 2165 2166 if (dev->devres_managed) { 2167 devres = devres_alloc(devm_input_device_unregister, 2168 sizeof(*devres), GFP_KERNEL); 2169 if (!devres) 2170 return -ENOMEM; 2171 2172 devres->input = dev; 2173 } 2174 2175 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2176 __set_bit(EV_SYN, dev->evbit); 2177 2178 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2179 __clear_bit(KEY_RESERVED, dev->keybit); 2180 2181 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2182 input_cleanse_bitmasks(dev); 2183 2184 packet_size = input_estimate_events_per_packet(dev); 2185 if (dev->hint_events_per_packet < packet_size) 2186 dev->hint_events_per_packet = packet_size; 2187 2188 dev->max_vals = dev->hint_events_per_packet + 2; 2189 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 2190 if (!dev->vals) { 2191 error = -ENOMEM; 2192 goto err_devres_free; 2193 } 2194 2195 /* 2196 * If delay and period are pre-set by the driver, then autorepeating 2197 * is handled by the driver itself and we don't do it in input.c. 2198 */ 2199 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2200 input_enable_softrepeat(dev, 250, 33); 2201 2202 if (!dev->getkeycode) 2203 dev->getkeycode = input_default_getkeycode; 2204 2205 if (!dev->setkeycode) 2206 dev->setkeycode = input_default_setkeycode; 2207 2208 if (dev->poller) 2209 input_dev_poller_finalize(dev->poller); 2210 2211 error = device_add(&dev->dev); 2212 if (error) 2213 goto err_free_vals; 2214 2215 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2216 pr_info("%s as %s\n", 2217 dev->name ? dev->name : "Unspecified device", 2218 path ? path : "N/A"); 2219 kfree(path); 2220 2221 error = mutex_lock_interruptible(&input_mutex); 2222 if (error) 2223 goto err_device_del; 2224 2225 list_add_tail(&dev->node, &input_dev_list); 2226 2227 list_for_each_entry(handler, &input_handler_list, node) 2228 input_attach_handler(dev, handler); 2229 2230 input_wakeup_procfs_readers(); 2231 2232 mutex_unlock(&input_mutex); 2233 2234 if (dev->devres_managed) { 2235 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2236 __func__, dev_name(&dev->dev)); 2237 devres_add(dev->dev.parent, devres); 2238 } 2239 return 0; 2240 2241 err_device_del: 2242 device_del(&dev->dev); 2243 err_free_vals: 2244 kfree(dev->vals); 2245 dev->vals = NULL; 2246 err_devres_free: 2247 devres_free(devres); 2248 return error; 2249 } 2250 EXPORT_SYMBOL(input_register_device); 2251 2252 /** 2253 * input_unregister_device - unregister previously registered device 2254 * @dev: device to be unregistered 2255 * 2256 * This function unregisters an input device. Once device is unregistered 2257 * the caller should not try to access it as it may get freed at any moment. 2258 */ 2259 void input_unregister_device(struct input_dev *dev) 2260 { 2261 if (dev->devres_managed) { 2262 WARN_ON(devres_destroy(dev->dev.parent, 2263 devm_input_device_unregister, 2264 devm_input_device_match, 2265 dev)); 2266 __input_unregister_device(dev); 2267 /* 2268 * We do not do input_put_device() here because it will be done 2269 * when 2nd devres fires up. 2270 */ 2271 } else { 2272 __input_unregister_device(dev); 2273 input_put_device(dev); 2274 } 2275 } 2276 EXPORT_SYMBOL(input_unregister_device); 2277 2278 /** 2279 * input_register_handler - register a new input handler 2280 * @handler: handler to be registered 2281 * 2282 * This function registers a new input handler (interface) for input 2283 * devices in the system and attaches it to all input devices that 2284 * are compatible with the handler. 2285 */ 2286 int input_register_handler(struct input_handler *handler) 2287 { 2288 struct input_dev *dev; 2289 int error; 2290 2291 error = mutex_lock_interruptible(&input_mutex); 2292 if (error) 2293 return error; 2294 2295 INIT_LIST_HEAD(&handler->h_list); 2296 2297 list_add_tail(&handler->node, &input_handler_list); 2298 2299 list_for_each_entry(dev, &input_dev_list, node) 2300 input_attach_handler(dev, handler); 2301 2302 input_wakeup_procfs_readers(); 2303 2304 mutex_unlock(&input_mutex); 2305 return 0; 2306 } 2307 EXPORT_SYMBOL(input_register_handler); 2308 2309 /** 2310 * input_unregister_handler - unregisters an input handler 2311 * @handler: handler to be unregistered 2312 * 2313 * This function disconnects a handler from its input devices and 2314 * removes it from lists of known handlers. 2315 */ 2316 void input_unregister_handler(struct input_handler *handler) 2317 { 2318 struct input_handle *handle, *next; 2319 2320 mutex_lock(&input_mutex); 2321 2322 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2323 handler->disconnect(handle); 2324 WARN_ON(!list_empty(&handler->h_list)); 2325 2326 list_del_init(&handler->node); 2327 2328 input_wakeup_procfs_readers(); 2329 2330 mutex_unlock(&input_mutex); 2331 } 2332 EXPORT_SYMBOL(input_unregister_handler); 2333 2334 /** 2335 * input_handler_for_each_handle - handle iterator 2336 * @handler: input handler to iterate 2337 * @data: data for the callback 2338 * @fn: function to be called for each handle 2339 * 2340 * Iterate over @bus's list of devices, and call @fn for each, passing 2341 * it @data and stop when @fn returns a non-zero value. The function is 2342 * using RCU to traverse the list and therefore may be using in atomic 2343 * contexts. The @fn callback is invoked from RCU critical section and 2344 * thus must not sleep. 2345 */ 2346 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2347 int (*fn)(struct input_handle *, void *)) 2348 { 2349 struct input_handle *handle; 2350 int retval = 0; 2351 2352 rcu_read_lock(); 2353 2354 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2355 retval = fn(handle, data); 2356 if (retval) 2357 break; 2358 } 2359 2360 rcu_read_unlock(); 2361 2362 return retval; 2363 } 2364 EXPORT_SYMBOL(input_handler_for_each_handle); 2365 2366 /** 2367 * input_register_handle - register a new input handle 2368 * @handle: handle to register 2369 * 2370 * This function puts a new input handle onto device's 2371 * and handler's lists so that events can flow through 2372 * it once it is opened using input_open_device(). 2373 * 2374 * This function is supposed to be called from handler's 2375 * connect() method. 2376 */ 2377 int input_register_handle(struct input_handle *handle) 2378 { 2379 struct input_handler *handler = handle->handler; 2380 struct input_dev *dev = handle->dev; 2381 int error; 2382 2383 /* 2384 * We take dev->mutex here to prevent race with 2385 * input_release_device(). 2386 */ 2387 error = mutex_lock_interruptible(&dev->mutex); 2388 if (error) 2389 return error; 2390 2391 /* 2392 * Filters go to the head of the list, normal handlers 2393 * to the tail. 2394 */ 2395 if (handler->filter) 2396 list_add_rcu(&handle->d_node, &dev->h_list); 2397 else 2398 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2399 2400 mutex_unlock(&dev->mutex); 2401 2402 /* 2403 * Since we are supposed to be called from ->connect() 2404 * which is mutually exclusive with ->disconnect() 2405 * we can't be racing with input_unregister_handle() 2406 * and so separate lock is not needed here. 2407 */ 2408 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2409 2410 if (handler->start) 2411 handler->start(handle); 2412 2413 return 0; 2414 } 2415 EXPORT_SYMBOL(input_register_handle); 2416 2417 /** 2418 * input_unregister_handle - unregister an input handle 2419 * @handle: handle to unregister 2420 * 2421 * This function removes input handle from device's 2422 * and handler's lists. 2423 * 2424 * This function is supposed to be called from handler's 2425 * disconnect() method. 2426 */ 2427 void input_unregister_handle(struct input_handle *handle) 2428 { 2429 struct input_dev *dev = handle->dev; 2430 2431 list_del_rcu(&handle->h_node); 2432 2433 /* 2434 * Take dev->mutex to prevent race with input_release_device(). 2435 */ 2436 mutex_lock(&dev->mutex); 2437 list_del_rcu(&handle->d_node); 2438 mutex_unlock(&dev->mutex); 2439 2440 synchronize_rcu(); 2441 } 2442 EXPORT_SYMBOL(input_unregister_handle); 2443 2444 /** 2445 * input_get_new_minor - allocates a new input minor number 2446 * @legacy_base: beginning or the legacy range to be searched 2447 * @legacy_num: size of legacy range 2448 * @allow_dynamic: whether we can also take ID from the dynamic range 2449 * 2450 * This function allocates a new device minor for from input major namespace. 2451 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2452 * parameters and whether ID can be allocated from dynamic range if there are 2453 * no free IDs in legacy range. 2454 */ 2455 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2456 bool allow_dynamic) 2457 { 2458 /* 2459 * This function should be called from input handler's ->connect() 2460 * methods, which are serialized with input_mutex, so no additional 2461 * locking is needed here. 2462 */ 2463 if (legacy_base >= 0) { 2464 int minor = ida_simple_get(&input_ida, 2465 legacy_base, 2466 legacy_base + legacy_num, 2467 GFP_KERNEL); 2468 if (minor >= 0 || !allow_dynamic) 2469 return minor; 2470 } 2471 2472 return ida_simple_get(&input_ida, 2473 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES, 2474 GFP_KERNEL); 2475 } 2476 EXPORT_SYMBOL(input_get_new_minor); 2477 2478 /** 2479 * input_free_minor - release previously allocated minor 2480 * @minor: minor to be released 2481 * 2482 * This function releases previously allocated input minor so that it can be 2483 * reused later. 2484 */ 2485 void input_free_minor(unsigned int minor) 2486 { 2487 ida_simple_remove(&input_ida, minor); 2488 } 2489 EXPORT_SYMBOL(input_free_minor); 2490 2491 static int __init input_init(void) 2492 { 2493 int err; 2494 2495 err = class_register(&input_class); 2496 if (err) { 2497 pr_err("unable to register input_dev class\n"); 2498 return err; 2499 } 2500 2501 err = input_proc_init(); 2502 if (err) 2503 goto fail1; 2504 2505 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2506 INPUT_MAX_CHAR_DEVICES, "input"); 2507 if (err) { 2508 pr_err("unable to register char major %d", INPUT_MAJOR); 2509 goto fail2; 2510 } 2511 2512 return 0; 2513 2514 fail2: input_proc_exit(); 2515 fail1: class_unregister(&input_class); 2516 return err; 2517 } 2518 2519 static void __exit input_exit(void) 2520 { 2521 input_proc_exit(); 2522 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2523 INPUT_MAX_CHAR_DEVICES); 2524 class_unregister(&input_class); 2525 } 2526 2527 subsys_initcall(input_init); 2528 module_exit(input_exit); 2529