1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The input core 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/idr.h> 14 #include <linux/input/mt.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/major.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/poll.h> 23 #include <linux/device.h> 24 #include <linux/kstrtox.h> 25 #include <linux/mutex.h> 26 #include <linux/rcupdate.h> 27 #include "input-compat.h" 28 #include "input-core-private.h" 29 #include "input-poller.h" 30 31 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 32 MODULE_DESCRIPTION("Input core"); 33 MODULE_LICENSE("GPL"); 34 35 #define INPUT_MAX_CHAR_DEVICES 1024 36 #define INPUT_FIRST_DYNAMIC_DEV 256 37 static DEFINE_IDA(input_ida); 38 39 static LIST_HEAD(input_dev_list); 40 static LIST_HEAD(input_handler_list); 41 42 /* 43 * input_mutex protects access to both input_dev_list and input_handler_list. 44 * This also causes input_[un]register_device and input_[un]register_handler 45 * be mutually exclusive which simplifies locking in drivers implementing 46 * input handlers. 47 */ 48 static DEFINE_MUTEX(input_mutex); 49 50 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 51 52 static const unsigned int input_max_code[EV_CNT] = { 53 [EV_KEY] = KEY_MAX, 54 [EV_REL] = REL_MAX, 55 [EV_ABS] = ABS_MAX, 56 [EV_MSC] = MSC_MAX, 57 [EV_SW] = SW_MAX, 58 [EV_LED] = LED_MAX, 59 [EV_SND] = SND_MAX, 60 [EV_FF] = FF_MAX, 61 }; 62 63 static inline int is_event_supported(unsigned int code, 64 unsigned long *bm, unsigned int max) 65 { 66 return code <= max && test_bit(code, bm); 67 } 68 69 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 70 { 71 if (fuzz) { 72 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 73 return old_val; 74 75 if (value > old_val - fuzz && value < old_val + fuzz) 76 return (old_val * 3 + value) / 4; 77 78 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 79 return (old_val + value) / 2; 80 } 81 82 return value; 83 } 84 85 static void input_start_autorepeat(struct input_dev *dev, int code) 86 { 87 if (test_bit(EV_REP, dev->evbit) && 88 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 89 dev->timer.function) { 90 dev->repeat_key = code; 91 mod_timer(&dev->timer, 92 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 93 } 94 } 95 96 static void input_stop_autorepeat(struct input_dev *dev) 97 { 98 del_timer(&dev->timer); 99 } 100 101 /* 102 * Pass event first through all filters and then, if event has not been 103 * filtered out, through all open handles. This function is called with 104 * dev->event_lock held and interrupts disabled. 105 */ 106 static unsigned int input_to_handler(struct input_handle *handle, 107 struct input_value *vals, unsigned int count) 108 { 109 struct input_handler *handler = handle->handler; 110 struct input_value *end = vals; 111 struct input_value *v; 112 113 if (handler->filter) { 114 for (v = vals; v != vals + count; v++) { 115 if (handler->filter(handle, v->type, v->code, v->value)) 116 continue; 117 if (end != v) 118 *end = *v; 119 end++; 120 } 121 count = end - vals; 122 } 123 124 if (!count) 125 return 0; 126 127 if (handler->events) 128 handler->events(handle, vals, count); 129 else if (handler->event) 130 for (v = vals; v != vals + count; v++) 131 handler->event(handle, v->type, v->code, v->value); 132 133 return count; 134 } 135 136 /* 137 * Pass values first through all filters and then, if event has not been 138 * filtered out, through all open handles. This function is called with 139 * dev->event_lock held and interrupts disabled. 140 */ 141 static void input_pass_values(struct input_dev *dev, 142 struct input_value *vals, unsigned int count) 143 { 144 struct input_handle *handle; 145 struct input_value *v; 146 147 lockdep_assert_held(&dev->event_lock); 148 149 if (!count) 150 return; 151 152 rcu_read_lock(); 153 154 handle = rcu_dereference(dev->grab); 155 if (handle) { 156 count = input_to_handler(handle, vals, count); 157 } else { 158 list_for_each_entry_rcu(handle, &dev->h_list, d_node) 159 if (handle->open) { 160 count = input_to_handler(handle, vals, count); 161 if (!count) 162 break; 163 } 164 } 165 166 rcu_read_unlock(); 167 168 /* trigger auto repeat for key events */ 169 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 170 for (v = vals; v != vals + count; v++) { 171 if (v->type == EV_KEY && v->value != 2) { 172 if (v->value) 173 input_start_autorepeat(dev, v->code); 174 else 175 input_stop_autorepeat(dev); 176 } 177 } 178 } 179 } 180 181 #define INPUT_IGNORE_EVENT 0 182 #define INPUT_PASS_TO_HANDLERS 1 183 #define INPUT_PASS_TO_DEVICE 2 184 #define INPUT_SLOT 4 185 #define INPUT_FLUSH 8 186 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 187 188 static int input_handle_abs_event(struct input_dev *dev, 189 unsigned int code, int *pval) 190 { 191 struct input_mt *mt = dev->mt; 192 bool is_mt_event; 193 int *pold; 194 195 if (code == ABS_MT_SLOT) { 196 /* 197 * "Stage" the event; we'll flush it later, when we 198 * get actual touch data. 199 */ 200 if (mt && *pval >= 0 && *pval < mt->num_slots) 201 mt->slot = *pval; 202 203 return INPUT_IGNORE_EVENT; 204 } 205 206 is_mt_event = input_is_mt_value(code); 207 208 if (!is_mt_event) { 209 pold = &dev->absinfo[code].value; 210 } else if (mt) { 211 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 212 } else { 213 /* 214 * Bypass filtering for multi-touch events when 215 * not employing slots. 216 */ 217 pold = NULL; 218 } 219 220 if (pold) { 221 *pval = input_defuzz_abs_event(*pval, *pold, 222 dev->absinfo[code].fuzz); 223 if (*pold == *pval) 224 return INPUT_IGNORE_EVENT; 225 226 *pold = *pval; 227 } 228 229 /* Flush pending "slot" event */ 230 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 231 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot); 232 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 233 } 234 235 return INPUT_PASS_TO_HANDLERS; 236 } 237 238 static int input_get_disposition(struct input_dev *dev, 239 unsigned int type, unsigned int code, int *pval) 240 { 241 int disposition = INPUT_IGNORE_EVENT; 242 int value = *pval; 243 244 /* filter-out events from inhibited devices */ 245 if (dev->inhibited) 246 return INPUT_IGNORE_EVENT; 247 248 switch (type) { 249 250 case EV_SYN: 251 switch (code) { 252 case SYN_CONFIG: 253 disposition = INPUT_PASS_TO_ALL; 254 break; 255 256 case SYN_REPORT: 257 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 258 break; 259 case SYN_MT_REPORT: 260 disposition = INPUT_PASS_TO_HANDLERS; 261 break; 262 } 263 break; 264 265 case EV_KEY: 266 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 267 268 /* auto-repeat bypasses state updates */ 269 if (value == 2) { 270 disposition = INPUT_PASS_TO_HANDLERS; 271 break; 272 } 273 274 if (!!test_bit(code, dev->key) != !!value) { 275 276 __change_bit(code, dev->key); 277 disposition = INPUT_PASS_TO_HANDLERS; 278 } 279 } 280 break; 281 282 case EV_SW: 283 if (is_event_supported(code, dev->swbit, SW_MAX) && 284 !!test_bit(code, dev->sw) != !!value) { 285 286 __change_bit(code, dev->sw); 287 disposition = INPUT_PASS_TO_HANDLERS; 288 } 289 break; 290 291 case EV_ABS: 292 if (is_event_supported(code, dev->absbit, ABS_MAX)) 293 disposition = input_handle_abs_event(dev, code, &value); 294 295 break; 296 297 case EV_REL: 298 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 299 disposition = INPUT_PASS_TO_HANDLERS; 300 301 break; 302 303 case EV_MSC: 304 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 305 disposition = INPUT_PASS_TO_ALL; 306 307 break; 308 309 case EV_LED: 310 if (is_event_supported(code, dev->ledbit, LED_MAX) && 311 !!test_bit(code, dev->led) != !!value) { 312 313 __change_bit(code, dev->led); 314 disposition = INPUT_PASS_TO_ALL; 315 } 316 break; 317 318 case EV_SND: 319 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 320 321 if (!!test_bit(code, dev->snd) != !!value) 322 __change_bit(code, dev->snd); 323 disposition = INPUT_PASS_TO_ALL; 324 } 325 break; 326 327 case EV_REP: 328 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 329 dev->rep[code] = value; 330 disposition = INPUT_PASS_TO_ALL; 331 } 332 break; 333 334 case EV_FF: 335 if (value >= 0) 336 disposition = INPUT_PASS_TO_ALL; 337 break; 338 339 case EV_PWR: 340 disposition = INPUT_PASS_TO_ALL; 341 break; 342 } 343 344 *pval = value; 345 return disposition; 346 } 347 348 static void input_event_dispose(struct input_dev *dev, int disposition, 349 unsigned int type, unsigned int code, int value) 350 { 351 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 352 dev->event(dev, type, code, value); 353 354 if (!dev->vals) 355 return; 356 357 if (disposition & INPUT_PASS_TO_HANDLERS) { 358 struct input_value *v; 359 360 if (disposition & INPUT_SLOT) { 361 v = &dev->vals[dev->num_vals++]; 362 v->type = EV_ABS; 363 v->code = ABS_MT_SLOT; 364 v->value = dev->mt->slot; 365 } 366 367 v = &dev->vals[dev->num_vals++]; 368 v->type = type; 369 v->code = code; 370 v->value = value; 371 } 372 373 if (disposition & INPUT_FLUSH) { 374 if (dev->num_vals >= 2) 375 input_pass_values(dev, dev->vals, dev->num_vals); 376 dev->num_vals = 0; 377 /* 378 * Reset the timestamp on flush so we won't end up 379 * with a stale one. Note we only need to reset the 380 * monolithic one as we use its presence when deciding 381 * whether to generate a synthetic timestamp. 382 */ 383 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0); 384 } else if (dev->num_vals >= dev->max_vals - 2) { 385 dev->vals[dev->num_vals++] = input_value_sync; 386 input_pass_values(dev, dev->vals, dev->num_vals); 387 dev->num_vals = 0; 388 } 389 } 390 391 void input_handle_event(struct input_dev *dev, 392 unsigned int type, unsigned int code, int value) 393 { 394 int disposition; 395 396 lockdep_assert_held(&dev->event_lock); 397 398 disposition = input_get_disposition(dev, type, code, &value); 399 if (disposition != INPUT_IGNORE_EVENT) { 400 if (type != EV_SYN) 401 add_input_randomness(type, code, value); 402 403 input_event_dispose(dev, disposition, type, code, value); 404 } 405 } 406 407 /** 408 * input_event() - report new input event 409 * @dev: device that generated the event 410 * @type: type of the event 411 * @code: event code 412 * @value: value of the event 413 * 414 * This function should be used by drivers implementing various input 415 * devices to report input events. See also input_inject_event(). 416 * 417 * NOTE: input_event() may be safely used right after input device was 418 * allocated with input_allocate_device(), even before it is registered 419 * with input_register_device(), but the event will not reach any of the 420 * input handlers. Such early invocation of input_event() may be used 421 * to 'seed' initial state of a switch or initial position of absolute 422 * axis, etc. 423 */ 424 void input_event(struct input_dev *dev, 425 unsigned int type, unsigned int code, int value) 426 { 427 unsigned long flags; 428 429 if (is_event_supported(type, dev->evbit, EV_MAX)) { 430 431 spin_lock_irqsave(&dev->event_lock, flags); 432 input_handle_event(dev, type, code, value); 433 spin_unlock_irqrestore(&dev->event_lock, flags); 434 } 435 } 436 EXPORT_SYMBOL(input_event); 437 438 /** 439 * input_inject_event() - send input event from input handler 440 * @handle: input handle to send event through 441 * @type: type of the event 442 * @code: event code 443 * @value: value of the event 444 * 445 * Similar to input_event() but will ignore event if device is 446 * "grabbed" and handle injecting event is not the one that owns 447 * the device. 448 */ 449 void input_inject_event(struct input_handle *handle, 450 unsigned int type, unsigned int code, int value) 451 { 452 struct input_dev *dev = handle->dev; 453 struct input_handle *grab; 454 unsigned long flags; 455 456 if (is_event_supported(type, dev->evbit, EV_MAX)) { 457 spin_lock_irqsave(&dev->event_lock, flags); 458 459 rcu_read_lock(); 460 grab = rcu_dereference(dev->grab); 461 if (!grab || grab == handle) 462 input_handle_event(dev, type, code, value); 463 rcu_read_unlock(); 464 465 spin_unlock_irqrestore(&dev->event_lock, flags); 466 } 467 } 468 EXPORT_SYMBOL(input_inject_event); 469 470 /** 471 * input_alloc_absinfo - allocates array of input_absinfo structs 472 * @dev: the input device emitting absolute events 473 * 474 * If the absinfo struct the caller asked for is already allocated, this 475 * functions will not do anything. 476 */ 477 void input_alloc_absinfo(struct input_dev *dev) 478 { 479 if (dev->absinfo) 480 return; 481 482 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); 483 if (!dev->absinfo) { 484 dev_err(dev->dev.parent ?: &dev->dev, 485 "%s: unable to allocate memory\n", __func__); 486 /* 487 * We will handle this allocation failure in 488 * input_register_device() when we refuse to register input 489 * device with ABS bits but without absinfo. 490 */ 491 } 492 } 493 EXPORT_SYMBOL(input_alloc_absinfo); 494 495 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 496 int min, int max, int fuzz, int flat) 497 { 498 struct input_absinfo *absinfo; 499 500 __set_bit(EV_ABS, dev->evbit); 501 __set_bit(axis, dev->absbit); 502 503 input_alloc_absinfo(dev); 504 if (!dev->absinfo) 505 return; 506 507 absinfo = &dev->absinfo[axis]; 508 absinfo->minimum = min; 509 absinfo->maximum = max; 510 absinfo->fuzz = fuzz; 511 absinfo->flat = flat; 512 } 513 EXPORT_SYMBOL(input_set_abs_params); 514 515 /** 516 * input_copy_abs - Copy absinfo from one input_dev to another 517 * @dst: Destination input device to copy the abs settings to 518 * @dst_axis: ABS_* value selecting the destination axis 519 * @src: Source input device to copy the abs settings from 520 * @src_axis: ABS_* value selecting the source axis 521 * 522 * Set absinfo for the selected destination axis by copying it from 523 * the specified source input device's source axis. 524 * This is useful to e.g. setup a pen/stylus input-device for combined 525 * touchscreen/pen hardware where the pen uses the same coordinates as 526 * the touchscreen. 527 */ 528 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, 529 const struct input_dev *src, unsigned int src_axis) 530 { 531 /* src must have EV_ABS and src_axis set */ 532 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) && 533 test_bit(src_axis, src->absbit)))) 534 return; 535 536 /* 537 * input_alloc_absinfo() may have failed for the source. Our caller is 538 * expected to catch this when registering the input devices, which may 539 * happen after the input_copy_abs() call. 540 */ 541 if (!src->absinfo) 542 return; 543 544 input_set_capability(dst, EV_ABS, dst_axis); 545 if (!dst->absinfo) 546 return; 547 548 dst->absinfo[dst_axis] = src->absinfo[src_axis]; 549 } 550 EXPORT_SYMBOL(input_copy_abs); 551 552 /** 553 * input_grab_device - grabs device for exclusive use 554 * @handle: input handle that wants to own the device 555 * 556 * When a device is grabbed by an input handle all events generated by 557 * the device are delivered only to this handle. Also events injected 558 * by other input handles are ignored while device is grabbed. 559 */ 560 int input_grab_device(struct input_handle *handle) 561 { 562 struct input_dev *dev = handle->dev; 563 int retval; 564 565 retval = mutex_lock_interruptible(&dev->mutex); 566 if (retval) 567 return retval; 568 569 if (dev->grab) { 570 retval = -EBUSY; 571 goto out; 572 } 573 574 rcu_assign_pointer(dev->grab, handle); 575 576 out: 577 mutex_unlock(&dev->mutex); 578 return retval; 579 } 580 EXPORT_SYMBOL(input_grab_device); 581 582 static void __input_release_device(struct input_handle *handle) 583 { 584 struct input_dev *dev = handle->dev; 585 struct input_handle *grabber; 586 587 grabber = rcu_dereference_protected(dev->grab, 588 lockdep_is_held(&dev->mutex)); 589 if (grabber == handle) { 590 rcu_assign_pointer(dev->grab, NULL); 591 /* Make sure input_pass_values() notices that grab is gone */ 592 synchronize_rcu(); 593 594 list_for_each_entry(handle, &dev->h_list, d_node) 595 if (handle->open && handle->handler->start) 596 handle->handler->start(handle); 597 } 598 } 599 600 /** 601 * input_release_device - release previously grabbed device 602 * @handle: input handle that owns the device 603 * 604 * Releases previously grabbed device so that other input handles can 605 * start receiving input events. Upon release all handlers attached 606 * to the device have their start() method called so they have a change 607 * to synchronize device state with the rest of the system. 608 */ 609 void input_release_device(struct input_handle *handle) 610 { 611 struct input_dev *dev = handle->dev; 612 613 mutex_lock(&dev->mutex); 614 __input_release_device(handle); 615 mutex_unlock(&dev->mutex); 616 } 617 EXPORT_SYMBOL(input_release_device); 618 619 /** 620 * input_open_device - open input device 621 * @handle: handle through which device is being accessed 622 * 623 * This function should be called by input handlers when they 624 * want to start receive events from given input device. 625 */ 626 int input_open_device(struct input_handle *handle) 627 { 628 struct input_dev *dev = handle->dev; 629 int retval; 630 631 retval = mutex_lock_interruptible(&dev->mutex); 632 if (retval) 633 return retval; 634 635 if (dev->going_away) { 636 retval = -ENODEV; 637 goto out; 638 } 639 640 handle->open++; 641 642 if (dev->users++ || dev->inhibited) { 643 /* 644 * Device is already opened and/or inhibited, 645 * so we can exit immediately and report success. 646 */ 647 goto out; 648 } 649 650 if (dev->open) { 651 retval = dev->open(dev); 652 if (retval) { 653 dev->users--; 654 handle->open--; 655 /* 656 * Make sure we are not delivering any more events 657 * through this handle 658 */ 659 synchronize_rcu(); 660 goto out; 661 } 662 } 663 664 if (dev->poller) 665 input_dev_poller_start(dev->poller); 666 667 out: 668 mutex_unlock(&dev->mutex); 669 return retval; 670 } 671 EXPORT_SYMBOL(input_open_device); 672 673 int input_flush_device(struct input_handle *handle, struct file *file) 674 { 675 struct input_dev *dev = handle->dev; 676 int retval; 677 678 retval = mutex_lock_interruptible(&dev->mutex); 679 if (retval) 680 return retval; 681 682 if (dev->flush) 683 retval = dev->flush(dev, file); 684 685 mutex_unlock(&dev->mutex); 686 return retval; 687 } 688 EXPORT_SYMBOL(input_flush_device); 689 690 /** 691 * input_close_device - close input device 692 * @handle: handle through which device is being accessed 693 * 694 * This function should be called by input handlers when they 695 * want to stop receive events from given input device. 696 */ 697 void input_close_device(struct input_handle *handle) 698 { 699 struct input_dev *dev = handle->dev; 700 701 mutex_lock(&dev->mutex); 702 703 __input_release_device(handle); 704 705 if (!dev->inhibited && !--dev->users) { 706 if (dev->poller) 707 input_dev_poller_stop(dev->poller); 708 if (dev->close) 709 dev->close(dev); 710 } 711 712 if (!--handle->open) { 713 /* 714 * synchronize_rcu() makes sure that input_pass_values() 715 * completed and that no more input events are delivered 716 * through this handle 717 */ 718 synchronize_rcu(); 719 } 720 721 mutex_unlock(&dev->mutex); 722 } 723 EXPORT_SYMBOL(input_close_device); 724 725 /* 726 * Simulate keyup events for all keys that are marked as pressed. 727 * The function must be called with dev->event_lock held. 728 */ 729 static bool input_dev_release_keys(struct input_dev *dev) 730 { 731 bool need_sync = false; 732 int code; 733 734 lockdep_assert_held(&dev->event_lock); 735 736 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 737 for_each_set_bit(code, dev->key, KEY_CNT) { 738 input_handle_event(dev, EV_KEY, code, 0); 739 need_sync = true; 740 } 741 } 742 743 return need_sync; 744 } 745 746 /* 747 * Prepare device for unregistering 748 */ 749 static void input_disconnect_device(struct input_dev *dev) 750 { 751 struct input_handle *handle; 752 753 /* 754 * Mark device as going away. Note that we take dev->mutex here 755 * not to protect access to dev->going_away but rather to ensure 756 * that there are no threads in the middle of input_open_device() 757 */ 758 mutex_lock(&dev->mutex); 759 dev->going_away = true; 760 mutex_unlock(&dev->mutex); 761 762 spin_lock_irq(&dev->event_lock); 763 764 /* 765 * Simulate keyup events for all pressed keys so that handlers 766 * are not left with "stuck" keys. The driver may continue 767 * generate events even after we done here but they will not 768 * reach any handlers. 769 */ 770 if (input_dev_release_keys(dev)) 771 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 772 773 list_for_each_entry(handle, &dev->h_list, d_node) 774 handle->open = 0; 775 776 spin_unlock_irq(&dev->event_lock); 777 } 778 779 /** 780 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 781 * @ke: keymap entry containing scancode to be converted. 782 * @scancode: pointer to the location where converted scancode should 783 * be stored. 784 * 785 * This function is used to convert scancode stored in &struct keymap_entry 786 * into scalar form understood by legacy keymap handling methods. These 787 * methods expect scancodes to be represented as 'unsigned int'. 788 */ 789 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 790 unsigned int *scancode) 791 { 792 switch (ke->len) { 793 case 1: 794 *scancode = *((u8 *)ke->scancode); 795 break; 796 797 case 2: 798 *scancode = *((u16 *)ke->scancode); 799 break; 800 801 case 4: 802 *scancode = *((u32 *)ke->scancode); 803 break; 804 805 default: 806 return -EINVAL; 807 } 808 809 return 0; 810 } 811 EXPORT_SYMBOL(input_scancode_to_scalar); 812 813 /* 814 * Those routines handle the default case where no [gs]etkeycode() is 815 * defined. In this case, an array indexed by the scancode is used. 816 */ 817 818 static unsigned int input_fetch_keycode(struct input_dev *dev, 819 unsigned int index) 820 { 821 switch (dev->keycodesize) { 822 case 1: 823 return ((u8 *)dev->keycode)[index]; 824 825 case 2: 826 return ((u16 *)dev->keycode)[index]; 827 828 default: 829 return ((u32 *)dev->keycode)[index]; 830 } 831 } 832 833 static int input_default_getkeycode(struct input_dev *dev, 834 struct input_keymap_entry *ke) 835 { 836 unsigned int index; 837 int error; 838 839 if (!dev->keycodesize) 840 return -EINVAL; 841 842 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 843 index = ke->index; 844 else { 845 error = input_scancode_to_scalar(ke, &index); 846 if (error) 847 return error; 848 } 849 850 if (index >= dev->keycodemax) 851 return -EINVAL; 852 853 ke->keycode = input_fetch_keycode(dev, index); 854 ke->index = index; 855 ke->len = sizeof(index); 856 memcpy(ke->scancode, &index, sizeof(index)); 857 858 return 0; 859 } 860 861 static int input_default_setkeycode(struct input_dev *dev, 862 const struct input_keymap_entry *ke, 863 unsigned int *old_keycode) 864 { 865 unsigned int index; 866 int error; 867 int i; 868 869 if (!dev->keycodesize) 870 return -EINVAL; 871 872 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 873 index = ke->index; 874 } else { 875 error = input_scancode_to_scalar(ke, &index); 876 if (error) 877 return error; 878 } 879 880 if (index >= dev->keycodemax) 881 return -EINVAL; 882 883 if (dev->keycodesize < sizeof(ke->keycode) && 884 (ke->keycode >> (dev->keycodesize * 8))) 885 return -EINVAL; 886 887 switch (dev->keycodesize) { 888 case 1: { 889 u8 *k = (u8 *)dev->keycode; 890 *old_keycode = k[index]; 891 k[index] = ke->keycode; 892 break; 893 } 894 case 2: { 895 u16 *k = (u16 *)dev->keycode; 896 *old_keycode = k[index]; 897 k[index] = ke->keycode; 898 break; 899 } 900 default: { 901 u32 *k = (u32 *)dev->keycode; 902 *old_keycode = k[index]; 903 k[index] = ke->keycode; 904 break; 905 } 906 } 907 908 if (*old_keycode <= KEY_MAX) { 909 __clear_bit(*old_keycode, dev->keybit); 910 for (i = 0; i < dev->keycodemax; i++) { 911 if (input_fetch_keycode(dev, i) == *old_keycode) { 912 __set_bit(*old_keycode, dev->keybit); 913 /* Setting the bit twice is useless, so break */ 914 break; 915 } 916 } 917 } 918 919 __set_bit(ke->keycode, dev->keybit); 920 return 0; 921 } 922 923 /** 924 * input_get_keycode - retrieve keycode currently mapped to a given scancode 925 * @dev: input device which keymap is being queried 926 * @ke: keymap entry 927 * 928 * This function should be called by anyone interested in retrieving current 929 * keymap. Presently evdev handlers use it. 930 */ 931 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 932 { 933 unsigned long flags; 934 int retval; 935 936 spin_lock_irqsave(&dev->event_lock, flags); 937 retval = dev->getkeycode(dev, ke); 938 spin_unlock_irqrestore(&dev->event_lock, flags); 939 940 return retval; 941 } 942 EXPORT_SYMBOL(input_get_keycode); 943 944 /** 945 * input_set_keycode - attribute a keycode to a given scancode 946 * @dev: input device which keymap is being updated 947 * @ke: new keymap entry 948 * 949 * This function should be called by anyone needing to update current 950 * keymap. Presently keyboard and evdev handlers use it. 951 */ 952 int input_set_keycode(struct input_dev *dev, 953 const struct input_keymap_entry *ke) 954 { 955 unsigned long flags; 956 unsigned int old_keycode; 957 int retval; 958 959 if (ke->keycode > KEY_MAX) 960 return -EINVAL; 961 962 spin_lock_irqsave(&dev->event_lock, flags); 963 964 retval = dev->setkeycode(dev, ke, &old_keycode); 965 if (retval) 966 goto out; 967 968 /* Make sure KEY_RESERVED did not get enabled. */ 969 __clear_bit(KEY_RESERVED, dev->keybit); 970 971 /* 972 * Simulate keyup event if keycode is not present 973 * in the keymap anymore 974 */ 975 if (old_keycode > KEY_MAX) { 976 dev_warn(dev->dev.parent ?: &dev->dev, 977 "%s: got too big old keycode %#x\n", 978 __func__, old_keycode); 979 } else if (test_bit(EV_KEY, dev->evbit) && 980 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 981 __test_and_clear_bit(old_keycode, dev->key)) { 982 /* 983 * We have to use input_event_dispose() here directly instead 984 * of input_handle_event() because the key we want to release 985 * here is considered no longer supported by the device and 986 * input_handle_event() will ignore it. 987 */ 988 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS, 989 EV_KEY, old_keycode, 0); 990 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH, 991 EV_SYN, SYN_REPORT, 1); 992 } 993 994 out: 995 spin_unlock_irqrestore(&dev->event_lock, flags); 996 997 return retval; 998 } 999 EXPORT_SYMBOL(input_set_keycode); 1000 1001 bool input_match_device_id(const struct input_dev *dev, 1002 const struct input_device_id *id) 1003 { 1004 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 1005 if (id->bustype != dev->id.bustype) 1006 return false; 1007 1008 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 1009 if (id->vendor != dev->id.vendor) 1010 return false; 1011 1012 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 1013 if (id->product != dev->id.product) 1014 return false; 1015 1016 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 1017 if (id->version != dev->id.version) 1018 return false; 1019 1020 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 1021 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 1022 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 1023 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 1024 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 1025 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 1026 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 1027 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 1028 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 1029 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 1030 return false; 1031 } 1032 1033 return true; 1034 } 1035 EXPORT_SYMBOL(input_match_device_id); 1036 1037 static const struct input_device_id *input_match_device(struct input_handler *handler, 1038 struct input_dev *dev) 1039 { 1040 const struct input_device_id *id; 1041 1042 for (id = handler->id_table; id->flags || id->driver_info; id++) { 1043 if (input_match_device_id(dev, id) && 1044 (!handler->match || handler->match(handler, dev))) { 1045 return id; 1046 } 1047 } 1048 1049 return NULL; 1050 } 1051 1052 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 1053 { 1054 const struct input_device_id *id; 1055 int error; 1056 1057 id = input_match_device(handler, dev); 1058 if (!id) 1059 return -ENODEV; 1060 1061 error = handler->connect(handler, dev, id); 1062 if (error && error != -ENODEV) 1063 pr_err("failed to attach handler %s to device %s, error: %d\n", 1064 handler->name, kobject_name(&dev->dev.kobj), error); 1065 1066 return error; 1067 } 1068 1069 #ifdef CONFIG_COMPAT 1070 1071 static int input_bits_to_string(char *buf, int buf_size, 1072 unsigned long bits, bool skip_empty) 1073 { 1074 int len = 0; 1075 1076 if (in_compat_syscall()) { 1077 u32 dword = bits >> 32; 1078 if (dword || !skip_empty) 1079 len += snprintf(buf, buf_size, "%x ", dword); 1080 1081 dword = bits & 0xffffffffUL; 1082 if (dword || !skip_empty || len) 1083 len += snprintf(buf + len, max(buf_size - len, 0), 1084 "%x", dword); 1085 } else { 1086 if (bits || !skip_empty) 1087 len += snprintf(buf, buf_size, "%lx", bits); 1088 } 1089 1090 return len; 1091 } 1092 1093 #else /* !CONFIG_COMPAT */ 1094 1095 static int input_bits_to_string(char *buf, int buf_size, 1096 unsigned long bits, bool skip_empty) 1097 { 1098 return bits || !skip_empty ? 1099 snprintf(buf, buf_size, "%lx", bits) : 0; 1100 } 1101 1102 #endif 1103 1104 #ifdef CONFIG_PROC_FS 1105 1106 static struct proc_dir_entry *proc_bus_input_dir; 1107 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1108 static int input_devices_state; 1109 1110 static inline void input_wakeup_procfs_readers(void) 1111 { 1112 input_devices_state++; 1113 wake_up(&input_devices_poll_wait); 1114 } 1115 1116 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1117 { 1118 poll_wait(file, &input_devices_poll_wait, wait); 1119 if (file->f_version != input_devices_state) { 1120 file->f_version = input_devices_state; 1121 return EPOLLIN | EPOLLRDNORM; 1122 } 1123 1124 return 0; 1125 } 1126 1127 union input_seq_state { 1128 struct { 1129 unsigned short pos; 1130 bool mutex_acquired; 1131 }; 1132 void *p; 1133 }; 1134 1135 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1136 { 1137 union input_seq_state *state = (union input_seq_state *)&seq->private; 1138 int error; 1139 1140 /* We need to fit into seq->private pointer */ 1141 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1142 1143 error = mutex_lock_interruptible(&input_mutex); 1144 if (error) { 1145 state->mutex_acquired = false; 1146 return ERR_PTR(error); 1147 } 1148 1149 state->mutex_acquired = true; 1150 1151 return seq_list_start(&input_dev_list, *pos); 1152 } 1153 1154 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1155 { 1156 return seq_list_next(v, &input_dev_list, pos); 1157 } 1158 1159 static void input_seq_stop(struct seq_file *seq, void *v) 1160 { 1161 union input_seq_state *state = (union input_seq_state *)&seq->private; 1162 1163 if (state->mutex_acquired) 1164 mutex_unlock(&input_mutex); 1165 } 1166 1167 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1168 unsigned long *bitmap, int max) 1169 { 1170 int i; 1171 bool skip_empty = true; 1172 char buf[18]; 1173 1174 seq_printf(seq, "B: %s=", name); 1175 1176 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1177 if (input_bits_to_string(buf, sizeof(buf), 1178 bitmap[i], skip_empty)) { 1179 skip_empty = false; 1180 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1181 } 1182 } 1183 1184 /* 1185 * If no output was produced print a single 0. 1186 */ 1187 if (skip_empty) 1188 seq_putc(seq, '0'); 1189 1190 seq_putc(seq, '\n'); 1191 } 1192 1193 static int input_devices_seq_show(struct seq_file *seq, void *v) 1194 { 1195 struct input_dev *dev = container_of(v, struct input_dev, node); 1196 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1197 struct input_handle *handle; 1198 1199 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1200 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1201 1202 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1203 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1204 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1205 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1206 seq_puts(seq, "H: Handlers="); 1207 1208 list_for_each_entry(handle, &dev->h_list, d_node) 1209 seq_printf(seq, "%s ", handle->name); 1210 seq_putc(seq, '\n'); 1211 1212 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1213 1214 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1215 if (test_bit(EV_KEY, dev->evbit)) 1216 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1217 if (test_bit(EV_REL, dev->evbit)) 1218 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1219 if (test_bit(EV_ABS, dev->evbit)) 1220 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1221 if (test_bit(EV_MSC, dev->evbit)) 1222 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1223 if (test_bit(EV_LED, dev->evbit)) 1224 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1225 if (test_bit(EV_SND, dev->evbit)) 1226 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1227 if (test_bit(EV_FF, dev->evbit)) 1228 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1229 if (test_bit(EV_SW, dev->evbit)) 1230 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1231 1232 seq_putc(seq, '\n'); 1233 1234 kfree(path); 1235 return 0; 1236 } 1237 1238 static const struct seq_operations input_devices_seq_ops = { 1239 .start = input_devices_seq_start, 1240 .next = input_devices_seq_next, 1241 .stop = input_seq_stop, 1242 .show = input_devices_seq_show, 1243 }; 1244 1245 static int input_proc_devices_open(struct inode *inode, struct file *file) 1246 { 1247 return seq_open(file, &input_devices_seq_ops); 1248 } 1249 1250 static const struct proc_ops input_devices_proc_ops = { 1251 .proc_open = input_proc_devices_open, 1252 .proc_poll = input_proc_devices_poll, 1253 .proc_read = seq_read, 1254 .proc_lseek = seq_lseek, 1255 .proc_release = seq_release, 1256 }; 1257 1258 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1259 { 1260 union input_seq_state *state = (union input_seq_state *)&seq->private; 1261 int error; 1262 1263 /* We need to fit into seq->private pointer */ 1264 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1265 1266 error = mutex_lock_interruptible(&input_mutex); 1267 if (error) { 1268 state->mutex_acquired = false; 1269 return ERR_PTR(error); 1270 } 1271 1272 state->mutex_acquired = true; 1273 state->pos = *pos; 1274 1275 return seq_list_start(&input_handler_list, *pos); 1276 } 1277 1278 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1279 { 1280 union input_seq_state *state = (union input_seq_state *)&seq->private; 1281 1282 state->pos = *pos + 1; 1283 return seq_list_next(v, &input_handler_list, pos); 1284 } 1285 1286 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1287 { 1288 struct input_handler *handler = container_of(v, struct input_handler, node); 1289 union input_seq_state *state = (union input_seq_state *)&seq->private; 1290 1291 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1292 if (handler->filter) 1293 seq_puts(seq, " (filter)"); 1294 if (handler->legacy_minors) 1295 seq_printf(seq, " Minor=%d", handler->minor); 1296 seq_putc(seq, '\n'); 1297 1298 return 0; 1299 } 1300 1301 static const struct seq_operations input_handlers_seq_ops = { 1302 .start = input_handlers_seq_start, 1303 .next = input_handlers_seq_next, 1304 .stop = input_seq_stop, 1305 .show = input_handlers_seq_show, 1306 }; 1307 1308 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1309 { 1310 return seq_open(file, &input_handlers_seq_ops); 1311 } 1312 1313 static const struct proc_ops input_handlers_proc_ops = { 1314 .proc_open = input_proc_handlers_open, 1315 .proc_read = seq_read, 1316 .proc_lseek = seq_lseek, 1317 .proc_release = seq_release, 1318 }; 1319 1320 static int __init input_proc_init(void) 1321 { 1322 struct proc_dir_entry *entry; 1323 1324 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1325 if (!proc_bus_input_dir) 1326 return -ENOMEM; 1327 1328 entry = proc_create("devices", 0, proc_bus_input_dir, 1329 &input_devices_proc_ops); 1330 if (!entry) 1331 goto fail1; 1332 1333 entry = proc_create("handlers", 0, proc_bus_input_dir, 1334 &input_handlers_proc_ops); 1335 if (!entry) 1336 goto fail2; 1337 1338 return 0; 1339 1340 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1341 fail1: remove_proc_entry("bus/input", NULL); 1342 return -ENOMEM; 1343 } 1344 1345 static void input_proc_exit(void) 1346 { 1347 remove_proc_entry("devices", proc_bus_input_dir); 1348 remove_proc_entry("handlers", proc_bus_input_dir); 1349 remove_proc_entry("bus/input", NULL); 1350 } 1351 1352 #else /* !CONFIG_PROC_FS */ 1353 static inline void input_wakeup_procfs_readers(void) { } 1354 static inline int input_proc_init(void) { return 0; } 1355 static inline void input_proc_exit(void) { } 1356 #endif 1357 1358 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1359 static ssize_t input_dev_show_##name(struct device *dev, \ 1360 struct device_attribute *attr, \ 1361 char *buf) \ 1362 { \ 1363 struct input_dev *input_dev = to_input_dev(dev); \ 1364 \ 1365 return scnprintf(buf, PAGE_SIZE, "%s\n", \ 1366 input_dev->name ? input_dev->name : ""); \ 1367 } \ 1368 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1369 1370 INPUT_DEV_STRING_ATTR_SHOW(name); 1371 INPUT_DEV_STRING_ATTR_SHOW(phys); 1372 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1373 1374 static int input_print_modalias_bits(char *buf, int size, 1375 char name, unsigned long *bm, 1376 unsigned int min_bit, unsigned int max_bit) 1377 { 1378 int len = 0, i; 1379 1380 len += snprintf(buf, max(size, 0), "%c", name); 1381 for (i = min_bit; i < max_bit; i++) 1382 if (bm[BIT_WORD(i)] & BIT_MASK(i)) 1383 len += snprintf(buf + len, max(size - len, 0), "%X,", i); 1384 return len; 1385 } 1386 1387 static int input_print_modalias(char *buf, int size, struct input_dev *id, 1388 int add_cr) 1389 { 1390 int len; 1391 1392 len = snprintf(buf, max(size, 0), 1393 "input:b%04Xv%04Xp%04Xe%04X-", 1394 id->id.bustype, id->id.vendor, 1395 id->id.product, id->id.version); 1396 1397 len += input_print_modalias_bits(buf + len, size - len, 1398 'e', id->evbit, 0, EV_MAX); 1399 len += input_print_modalias_bits(buf + len, size - len, 1400 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1401 len += input_print_modalias_bits(buf + len, size - len, 1402 'r', id->relbit, 0, REL_MAX); 1403 len += input_print_modalias_bits(buf + len, size - len, 1404 'a', id->absbit, 0, ABS_MAX); 1405 len += input_print_modalias_bits(buf + len, size - len, 1406 'm', id->mscbit, 0, MSC_MAX); 1407 len += input_print_modalias_bits(buf + len, size - len, 1408 'l', id->ledbit, 0, LED_MAX); 1409 len += input_print_modalias_bits(buf + len, size - len, 1410 's', id->sndbit, 0, SND_MAX); 1411 len += input_print_modalias_bits(buf + len, size - len, 1412 'f', id->ffbit, 0, FF_MAX); 1413 len += input_print_modalias_bits(buf + len, size - len, 1414 'w', id->swbit, 0, SW_MAX); 1415 1416 if (add_cr) 1417 len += snprintf(buf + len, max(size - len, 0), "\n"); 1418 1419 return len; 1420 } 1421 1422 static ssize_t input_dev_show_modalias(struct device *dev, 1423 struct device_attribute *attr, 1424 char *buf) 1425 { 1426 struct input_dev *id = to_input_dev(dev); 1427 ssize_t len; 1428 1429 len = input_print_modalias(buf, PAGE_SIZE, id, 1); 1430 1431 return min_t(int, len, PAGE_SIZE); 1432 } 1433 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1434 1435 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1436 int max, int add_cr); 1437 1438 static ssize_t input_dev_show_properties(struct device *dev, 1439 struct device_attribute *attr, 1440 char *buf) 1441 { 1442 struct input_dev *input_dev = to_input_dev(dev); 1443 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1444 INPUT_PROP_MAX, true); 1445 return min_t(int, len, PAGE_SIZE); 1446 } 1447 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1448 1449 static int input_inhibit_device(struct input_dev *dev); 1450 static int input_uninhibit_device(struct input_dev *dev); 1451 1452 static ssize_t inhibited_show(struct device *dev, 1453 struct device_attribute *attr, 1454 char *buf) 1455 { 1456 struct input_dev *input_dev = to_input_dev(dev); 1457 1458 return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited); 1459 } 1460 1461 static ssize_t inhibited_store(struct device *dev, 1462 struct device_attribute *attr, const char *buf, 1463 size_t len) 1464 { 1465 struct input_dev *input_dev = to_input_dev(dev); 1466 ssize_t rv; 1467 bool inhibited; 1468 1469 if (kstrtobool(buf, &inhibited)) 1470 return -EINVAL; 1471 1472 if (inhibited) 1473 rv = input_inhibit_device(input_dev); 1474 else 1475 rv = input_uninhibit_device(input_dev); 1476 1477 if (rv != 0) 1478 return rv; 1479 1480 return len; 1481 } 1482 1483 static DEVICE_ATTR_RW(inhibited); 1484 1485 static struct attribute *input_dev_attrs[] = { 1486 &dev_attr_name.attr, 1487 &dev_attr_phys.attr, 1488 &dev_attr_uniq.attr, 1489 &dev_attr_modalias.attr, 1490 &dev_attr_properties.attr, 1491 &dev_attr_inhibited.attr, 1492 NULL 1493 }; 1494 1495 static const struct attribute_group input_dev_attr_group = { 1496 .attrs = input_dev_attrs, 1497 }; 1498 1499 #define INPUT_DEV_ID_ATTR(name) \ 1500 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1501 struct device_attribute *attr, \ 1502 char *buf) \ 1503 { \ 1504 struct input_dev *input_dev = to_input_dev(dev); \ 1505 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \ 1506 } \ 1507 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1508 1509 INPUT_DEV_ID_ATTR(bustype); 1510 INPUT_DEV_ID_ATTR(vendor); 1511 INPUT_DEV_ID_ATTR(product); 1512 INPUT_DEV_ID_ATTR(version); 1513 1514 static struct attribute *input_dev_id_attrs[] = { 1515 &dev_attr_bustype.attr, 1516 &dev_attr_vendor.attr, 1517 &dev_attr_product.attr, 1518 &dev_attr_version.attr, 1519 NULL 1520 }; 1521 1522 static const struct attribute_group input_dev_id_attr_group = { 1523 .name = "id", 1524 .attrs = input_dev_id_attrs, 1525 }; 1526 1527 static int input_print_bitmap(char *buf, int buf_size, unsigned long *bitmap, 1528 int max, int add_cr) 1529 { 1530 int i; 1531 int len = 0; 1532 bool skip_empty = true; 1533 1534 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1535 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1536 bitmap[i], skip_empty); 1537 if (len) { 1538 skip_empty = false; 1539 if (i > 0) 1540 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1541 } 1542 } 1543 1544 /* 1545 * If no output was produced print a single 0. 1546 */ 1547 if (len == 0) 1548 len = snprintf(buf, buf_size, "%d", 0); 1549 1550 if (add_cr) 1551 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1552 1553 return len; 1554 } 1555 1556 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1557 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1558 struct device_attribute *attr, \ 1559 char *buf) \ 1560 { \ 1561 struct input_dev *input_dev = to_input_dev(dev); \ 1562 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1563 input_dev->bm##bit, ev##_MAX, \ 1564 true); \ 1565 return min_t(int, len, PAGE_SIZE); \ 1566 } \ 1567 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1568 1569 INPUT_DEV_CAP_ATTR(EV, ev); 1570 INPUT_DEV_CAP_ATTR(KEY, key); 1571 INPUT_DEV_CAP_ATTR(REL, rel); 1572 INPUT_DEV_CAP_ATTR(ABS, abs); 1573 INPUT_DEV_CAP_ATTR(MSC, msc); 1574 INPUT_DEV_CAP_ATTR(LED, led); 1575 INPUT_DEV_CAP_ATTR(SND, snd); 1576 INPUT_DEV_CAP_ATTR(FF, ff); 1577 INPUT_DEV_CAP_ATTR(SW, sw); 1578 1579 static struct attribute *input_dev_caps_attrs[] = { 1580 &dev_attr_ev.attr, 1581 &dev_attr_key.attr, 1582 &dev_attr_rel.attr, 1583 &dev_attr_abs.attr, 1584 &dev_attr_msc.attr, 1585 &dev_attr_led.attr, 1586 &dev_attr_snd.attr, 1587 &dev_attr_ff.attr, 1588 &dev_attr_sw.attr, 1589 NULL 1590 }; 1591 1592 static const struct attribute_group input_dev_caps_attr_group = { 1593 .name = "capabilities", 1594 .attrs = input_dev_caps_attrs, 1595 }; 1596 1597 static const struct attribute_group *input_dev_attr_groups[] = { 1598 &input_dev_attr_group, 1599 &input_dev_id_attr_group, 1600 &input_dev_caps_attr_group, 1601 &input_poller_attribute_group, 1602 NULL 1603 }; 1604 1605 static void input_dev_release(struct device *device) 1606 { 1607 struct input_dev *dev = to_input_dev(device); 1608 1609 input_ff_destroy(dev); 1610 input_mt_destroy_slots(dev); 1611 kfree(dev->poller); 1612 kfree(dev->absinfo); 1613 kfree(dev->vals); 1614 kfree(dev); 1615 1616 module_put(THIS_MODULE); 1617 } 1618 1619 /* 1620 * Input uevent interface - loading event handlers based on 1621 * device bitfields. 1622 */ 1623 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1624 const char *name, unsigned long *bitmap, int max) 1625 { 1626 int len; 1627 1628 if (add_uevent_var(env, "%s", name)) 1629 return -ENOMEM; 1630 1631 len = input_print_bitmap(&env->buf[env->buflen - 1], 1632 sizeof(env->buf) - env->buflen, 1633 bitmap, max, false); 1634 if (len >= (sizeof(env->buf) - env->buflen)) 1635 return -ENOMEM; 1636 1637 env->buflen += len; 1638 return 0; 1639 } 1640 1641 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1642 struct input_dev *dev) 1643 { 1644 int len; 1645 1646 if (add_uevent_var(env, "MODALIAS=")) 1647 return -ENOMEM; 1648 1649 len = input_print_modalias(&env->buf[env->buflen - 1], 1650 sizeof(env->buf) - env->buflen, 1651 dev, 0); 1652 if (len >= (sizeof(env->buf) - env->buflen)) 1653 return -ENOMEM; 1654 1655 env->buflen += len; 1656 return 0; 1657 } 1658 1659 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1660 do { \ 1661 int err = add_uevent_var(env, fmt, val); \ 1662 if (err) \ 1663 return err; \ 1664 } while (0) 1665 1666 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1667 do { \ 1668 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1669 if (err) \ 1670 return err; \ 1671 } while (0) 1672 1673 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1674 do { \ 1675 int err = input_add_uevent_modalias_var(env, dev); \ 1676 if (err) \ 1677 return err; \ 1678 } while (0) 1679 1680 static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env) 1681 { 1682 struct input_dev *dev = to_input_dev(device); 1683 1684 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1685 dev->id.bustype, dev->id.vendor, 1686 dev->id.product, dev->id.version); 1687 if (dev->name) 1688 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1689 if (dev->phys) 1690 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1691 if (dev->uniq) 1692 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1693 1694 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1695 1696 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1697 if (test_bit(EV_KEY, dev->evbit)) 1698 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1699 if (test_bit(EV_REL, dev->evbit)) 1700 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1701 if (test_bit(EV_ABS, dev->evbit)) 1702 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1703 if (test_bit(EV_MSC, dev->evbit)) 1704 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1705 if (test_bit(EV_LED, dev->evbit)) 1706 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1707 if (test_bit(EV_SND, dev->evbit)) 1708 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1709 if (test_bit(EV_FF, dev->evbit)) 1710 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1711 if (test_bit(EV_SW, dev->evbit)) 1712 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1713 1714 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1715 1716 return 0; 1717 } 1718 1719 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1720 do { \ 1721 int i; \ 1722 bool active; \ 1723 \ 1724 if (!test_bit(EV_##type, dev->evbit)) \ 1725 break; \ 1726 \ 1727 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1728 active = test_bit(i, dev->bits); \ 1729 if (!active && !on) \ 1730 continue; \ 1731 \ 1732 dev->event(dev, EV_##type, i, on ? active : 0); \ 1733 } \ 1734 } while (0) 1735 1736 static void input_dev_toggle(struct input_dev *dev, bool activate) 1737 { 1738 if (!dev->event) 1739 return; 1740 1741 INPUT_DO_TOGGLE(dev, LED, led, activate); 1742 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1743 1744 if (activate && test_bit(EV_REP, dev->evbit)) { 1745 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1746 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1747 } 1748 } 1749 1750 /** 1751 * input_reset_device() - reset/restore the state of input device 1752 * @dev: input device whose state needs to be reset 1753 * 1754 * This function tries to reset the state of an opened input device and 1755 * bring internal state and state if the hardware in sync with each other. 1756 * We mark all keys as released, restore LED state, repeat rate, etc. 1757 */ 1758 void input_reset_device(struct input_dev *dev) 1759 { 1760 unsigned long flags; 1761 1762 mutex_lock(&dev->mutex); 1763 spin_lock_irqsave(&dev->event_lock, flags); 1764 1765 input_dev_toggle(dev, true); 1766 if (input_dev_release_keys(dev)) 1767 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1768 1769 spin_unlock_irqrestore(&dev->event_lock, flags); 1770 mutex_unlock(&dev->mutex); 1771 } 1772 EXPORT_SYMBOL(input_reset_device); 1773 1774 static int input_inhibit_device(struct input_dev *dev) 1775 { 1776 mutex_lock(&dev->mutex); 1777 1778 if (dev->inhibited) 1779 goto out; 1780 1781 if (dev->users) { 1782 if (dev->close) 1783 dev->close(dev); 1784 if (dev->poller) 1785 input_dev_poller_stop(dev->poller); 1786 } 1787 1788 spin_lock_irq(&dev->event_lock); 1789 input_mt_release_slots(dev); 1790 input_dev_release_keys(dev); 1791 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1792 input_dev_toggle(dev, false); 1793 spin_unlock_irq(&dev->event_lock); 1794 1795 dev->inhibited = true; 1796 1797 out: 1798 mutex_unlock(&dev->mutex); 1799 return 0; 1800 } 1801 1802 static int input_uninhibit_device(struct input_dev *dev) 1803 { 1804 int ret = 0; 1805 1806 mutex_lock(&dev->mutex); 1807 1808 if (!dev->inhibited) 1809 goto out; 1810 1811 if (dev->users) { 1812 if (dev->open) { 1813 ret = dev->open(dev); 1814 if (ret) 1815 goto out; 1816 } 1817 if (dev->poller) 1818 input_dev_poller_start(dev->poller); 1819 } 1820 1821 dev->inhibited = false; 1822 spin_lock_irq(&dev->event_lock); 1823 input_dev_toggle(dev, true); 1824 spin_unlock_irq(&dev->event_lock); 1825 1826 out: 1827 mutex_unlock(&dev->mutex); 1828 return ret; 1829 } 1830 1831 #ifdef CONFIG_PM_SLEEP 1832 static int input_dev_suspend(struct device *dev) 1833 { 1834 struct input_dev *input_dev = to_input_dev(dev); 1835 1836 spin_lock_irq(&input_dev->event_lock); 1837 1838 /* 1839 * Keys that are pressed now are unlikely to be 1840 * still pressed when we resume. 1841 */ 1842 if (input_dev_release_keys(input_dev)) 1843 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1844 1845 /* Turn off LEDs and sounds, if any are active. */ 1846 input_dev_toggle(input_dev, false); 1847 1848 spin_unlock_irq(&input_dev->event_lock); 1849 1850 return 0; 1851 } 1852 1853 static int input_dev_resume(struct device *dev) 1854 { 1855 struct input_dev *input_dev = to_input_dev(dev); 1856 1857 spin_lock_irq(&input_dev->event_lock); 1858 1859 /* Restore state of LEDs and sounds, if any were active. */ 1860 input_dev_toggle(input_dev, true); 1861 1862 spin_unlock_irq(&input_dev->event_lock); 1863 1864 return 0; 1865 } 1866 1867 static int input_dev_freeze(struct device *dev) 1868 { 1869 struct input_dev *input_dev = to_input_dev(dev); 1870 1871 spin_lock_irq(&input_dev->event_lock); 1872 1873 /* 1874 * Keys that are pressed now are unlikely to be 1875 * still pressed when we resume. 1876 */ 1877 if (input_dev_release_keys(input_dev)) 1878 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1879 1880 spin_unlock_irq(&input_dev->event_lock); 1881 1882 return 0; 1883 } 1884 1885 static int input_dev_poweroff(struct device *dev) 1886 { 1887 struct input_dev *input_dev = to_input_dev(dev); 1888 1889 spin_lock_irq(&input_dev->event_lock); 1890 1891 /* Turn off LEDs and sounds, if any are active. */ 1892 input_dev_toggle(input_dev, false); 1893 1894 spin_unlock_irq(&input_dev->event_lock); 1895 1896 return 0; 1897 } 1898 1899 static const struct dev_pm_ops input_dev_pm_ops = { 1900 .suspend = input_dev_suspend, 1901 .resume = input_dev_resume, 1902 .freeze = input_dev_freeze, 1903 .poweroff = input_dev_poweroff, 1904 .restore = input_dev_resume, 1905 }; 1906 #endif /* CONFIG_PM */ 1907 1908 static const struct device_type input_dev_type = { 1909 .groups = input_dev_attr_groups, 1910 .release = input_dev_release, 1911 .uevent = input_dev_uevent, 1912 #ifdef CONFIG_PM_SLEEP 1913 .pm = &input_dev_pm_ops, 1914 #endif 1915 }; 1916 1917 static char *input_devnode(const struct device *dev, umode_t *mode) 1918 { 1919 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1920 } 1921 1922 struct class input_class = { 1923 .name = "input", 1924 .devnode = input_devnode, 1925 }; 1926 EXPORT_SYMBOL_GPL(input_class); 1927 1928 /** 1929 * input_allocate_device - allocate memory for new input device 1930 * 1931 * Returns prepared struct input_dev or %NULL. 1932 * 1933 * NOTE: Use input_free_device() to free devices that have not been 1934 * registered; input_unregister_device() should be used for already 1935 * registered devices. 1936 */ 1937 struct input_dev *input_allocate_device(void) 1938 { 1939 static atomic_t input_no = ATOMIC_INIT(-1); 1940 struct input_dev *dev; 1941 1942 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1943 if (dev) { 1944 dev->dev.type = &input_dev_type; 1945 dev->dev.class = &input_class; 1946 device_initialize(&dev->dev); 1947 mutex_init(&dev->mutex); 1948 spin_lock_init(&dev->event_lock); 1949 timer_setup(&dev->timer, NULL, 0); 1950 INIT_LIST_HEAD(&dev->h_list); 1951 INIT_LIST_HEAD(&dev->node); 1952 1953 dev_set_name(&dev->dev, "input%lu", 1954 (unsigned long)atomic_inc_return(&input_no)); 1955 1956 __module_get(THIS_MODULE); 1957 } 1958 1959 return dev; 1960 } 1961 EXPORT_SYMBOL(input_allocate_device); 1962 1963 struct input_devres { 1964 struct input_dev *input; 1965 }; 1966 1967 static int devm_input_device_match(struct device *dev, void *res, void *data) 1968 { 1969 struct input_devres *devres = res; 1970 1971 return devres->input == data; 1972 } 1973 1974 static void devm_input_device_release(struct device *dev, void *res) 1975 { 1976 struct input_devres *devres = res; 1977 struct input_dev *input = devres->input; 1978 1979 dev_dbg(dev, "%s: dropping reference to %s\n", 1980 __func__, dev_name(&input->dev)); 1981 input_put_device(input); 1982 } 1983 1984 /** 1985 * devm_input_allocate_device - allocate managed input device 1986 * @dev: device owning the input device being created 1987 * 1988 * Returns prepared struct input_dev or %NULL. 1989 * 1990 * Managed input devices do not need to be explicitly unregistered or 1991 * freed as it will be done automatically when owner device unbinds from 1992 * its driver (or binding fails). Once managed input device is allocated, 1993 * it is ready to be set up and registered in the same fashion as regular 1994 * input device. There are no special devm_input_device_[un]register() 1995 * variants, regular ones work with both managed and unmanaged devices, 1996 * should you need them. In most cases however, managed input device need 1997 * not be explicitly unregistered or freed. 1998 * 1999 * NOTE: the owner device is set up as parent of input device and users 2000 * should not override it. 2001 */ 2002 struct input_dev *devm_input_allocate_device(struct device *dev) 2003 { 2004 struct input_dev *input; 2005 struct input_devres *devres; 2006 2007 devres = devres_alloc(devm_input_device_release, 2008 sizeof(*devres), GFP_KERNEL); 2009 if (!devres) 2010 return NULL; 2011 2012 input = input_allocate_device(); 2013 if (!input) { 2014 devres_free(devres); 2015 return NULL; 2016 } 2017 2018 input->dev.parent = dev; 2019 input->devres_managed = true; 2020 2021 devres->input = input; 2022 devres_add(dev, devres); 2023 2024 return input; 2025 } 2026 EXPORT_SYMBOL(devm_input_allocate_device); 2027 2028 /** 2029 * input_free_device - free memory occupied by input_dev structure 2030 * @dev: input device to free 2031 * 2032 * This function should only be used if input_register_device() 2033 * was not called yet or if it failed. Once device was registered 2034 * use input_unregister_device() and memory will be freed once last 2035 * reference to the device is dropped. 2036 * 2037 * Device should be allocated by input_allocate_device(). 2038 * 2039 * NOTE: If there are references to the input device then memory 2040 * will not be freed until last reference is dropped. 2041 */ 2042 void input_free_device(struct input_dev *dev) 2043 { 2044 if (dev) { 2045 if (dev->devres_managed) 2046 WARN_ON(devres_destroy(dev->dev.parent, 2047 devm_input_device_release, 2048 devm_input_device_match, 2049 dev)); 2050 input_put_device(dev); 2051 } 2052 } 2053 EXPORT_SYMBOL(input_free_device); 2054 2055 /** 2056 * input_set_timestamp - set timestamp for input events 2057 * @dev: input device to set timestamp for 2058 * @timestamp: the time at which the event has occurred 2059 * in CLOCK_MONOTONIC 2060 * 2061 * This function is intended to provide to the input system a more 2062 * accurate time of when an event actually occurred. The driver should 2063 * call this function as soon as a timestamp is acquired ensuring 2064 * clock conversions in input_set_timestamp are done correctly. 2065 * 2066 * The system entering suspend state between timestamp acquisition and 2067 * calling input_set_timestamp can result in inaccurate conversions. 2068 */ 2069 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp) 2070 { 2071 dev->timestamp[INPUT_CLK_MONO] = timestamp; 2072 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp); 2073 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp, 2074 TK_OFFS_BOOT); 2075 } 2076 EXPORT_SYMBOL(input_set_timestamp); 2077 2078 /** 2079 * input_get_timestamp - get timestamp for input events 2080 * @dev: input device to get timestamp from 2081 * 2082 * A valid timestamp is a timestamp of non-zero value. 2083 */ 2084 ktime_t *input_get_timestamp(struct input_dev *dev) 2085 { 2086 const ktime_t invalid_timestamp = ktime_set(0, 0); 2087 2088 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp)) 2089 input_set_timestamp(dev, ktime_get()); 2090 2091 return dev->timestamp; 2092 } 2093 EXPORT_SYMBOL(input_get_timestamp); 2094 2095 /** 2096 * input_set_capability - mark device as capable of a certain event 2097 * @dev: device that is capable of emitting or accepting event 2098 * @type: type of the event (EV_KEY, EV_REL, etc...) 2099 * @code: event code 2100 * 2101 * In addition to setting up corresponding bit in appropriate capability 2102 * bitmap the function also adjusts dev->evbit. 2103 */ 2104 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 2105 { 2106 if (type < EV_CNT && input_max_code[type] && 2107 code > input_max_code[type]) { 2108 pr_err("%s: invalid code %u for type %u\n", __func__, code, 2109 type); 2110 dump_stack(); 2111 return; 2112 } 2113 2114 switch (type) { 2115 case EV_KEY: 2116 __set_bit(code, dev->keybit); 2117 break; 2118 2119 case EV_REL: 2120 __set_bit(code, dev->relbit); 2121 break; 2122 2123 case EV_ABS: 2124 input_alloc_absinfo(dev); 2125 __set_bit(code, dev->absbit); 2126 break; 2127 2128 case EV_MSC: 2129 __set_bit(code, dev->mscbit); 2130 break; 2131 2132 case EV_SW: 2133 __set_bit(code, dev->swbit); 2134 break; 2135 2136 case EV_LED: 2137 __set_bit(code, dev->ledbit); 2138 break; 2139 2140 case EV_SND: 2141 __set_bit(code, dev->sndbit); 2142 break; 2143 2144 case EV_FF: 2145 __set_bit(code, dev->ffbit); 2146 break; 2147 2148 case EV_PWR: 2149 /* do nothing */ 2150 break; 2151 2152 default: 2153 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code); 2154 dump_stack(); 2155 return; 2156 } 2157 2158 __set_bit(type, dev->evbit); 2159 } 2160 EXPORT_SYMBOL(input_set_capability); 2161 2162 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 2163 { 2164 int mt_slots; 2165 int i; 2166 unsigned int events; 2167 2168 if (dev->mt) { 2169 mt_slots = dev->mt->num_slots; 2170 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 2171 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 2172 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 2173 mt_slots = clamp(mt_slots, 2, 32); 2174 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 2175 mt_slots = 2; 2176 } else { 2177 mt_slots = 0; 2178 } 2179 2180 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 2181 2182 if (test_bit(EV_ABS, dev->evbit)) 2183 for_each_set_bit(i, dev->absbit, ABS_CNT) 2184 events += input_is_mt_axis(i) ? mt_slots : 1; 2185 2186 if (test_bit(EV_REL, dev->evbit)) 2187 events += bitmap_weight(dev->relbit, REL_CNT); 2188 2189 /* Make room for KEY and MSC events */ 2190 events += 7; 2191 2192 return events; 2193 } 2194 2195 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 2196 do { \ 2197 if (!test_bit(EV_##type, dev->evbit)) \ 2198 memset(dev->bits##bit, 0, \ 2199 sizeof(dev->bits##bit)); \ 2200 } while (0) 2201 2202 static void input_cleanse_bitmasks(struct input_dev *dev) 2203 { 2204 INPUT_CLEANSE_BITMASK(dev, KEY, key); 2205 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2206 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2207 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2208 INPUT_CLEANSE_BITMASK(dev, LED, led); 2209 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2210 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2211 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2212 } 2213 2214 static void __input_unregister_device(struct input_dev *dev) 2215 { 2216 struct input_handle *handle, *next; 2217 2218 input_disconnect_device(dev); 2219 2220 mutex_lock(&input_mutex); 2221 2222 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2223 handle->handler->disconnect(handle); 2224 WARN_ON(!list_empty(&dev->h_list)); 2225 2226 del_timer_sync(&dev->timer); 2227 list_del_init(&dev->node); 2228 2229 input_wakeup_procfs_readers(); 2230 2231 mutex_unlock(&input_mutex); 2232 2233 device_del(&dev->dev); 2234 } 2235 2236 static void devm_input_device_unregister(struct device *dev, void *res) 2237 { 2238 struct input_devres *devres = res; 2239 struct input_dev *input = devres->input; 2240 2241 dev_dbg(dev, "%s: unregistering device %s\n", 2242 __func__, dev_name(&input->dev)); 2243 __input_unregister_device(input); 2244 } 2245 2246 /* 2247 * Generate software autorepeat event. Note that we take 2248 * dev->event_lock here to avoid racing with input_event 2249 * which may cause keys get "stuck". 2250 */ 2251 static void input_repeat_key(struct timer_list *t) 2252 { 2253 struct input_dev *dev = from_timer(dev, t, timer); 2254 unsigned long flags; 2255 2256 spin_lock_irqsave(&dev->event_lock, flags); 2257 2258 if (!dev->inhibited && 2259 test_bit(dev->repeat_key, dev->key) && 2260 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 2261 2262 input_set_timestamp(dev, ktime_get()); 2263 input_handle_event(dev, EV_KEY, dev->repeat_key, 2); 2264 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 2265 2266 if (dev->rep[REP_PERIOD]) 2267 mod_timer(&dev->timer, jiffies + 2268 msecs_to_jiffies(dev->rep[REP_PERIOD])); 2269 } 2270 2271 spin_unlock_irqrestore(&dev->event_lock, flags); 2272 } 2273 2274 /** 2275 * input_enable_softrepeat - enable software autorepeat 2276 * @dev: input device 2277 * @delay: repeat delay 2278 * @period: repeat period 2279 * 2280 * Enable software autorepeat on the input device. 2281 */ 2282 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2283 { 2284 dev->timer.function = input_repeat_key; 2285 dev->rep[REP_DELAY] = delay; 2286 dev->rep[REP_PERIOD] = period; 2287 } 2288 EXPORT_SYMBOL(input_enable_softrepeat); 2289 2290 bool input_device_enabled(struct input_dev *dev) 2291 { 2292 lockdep_assert_held(&dev->mutex); 2293 2294 return !dev->inhibited && dev->users > 0; 2295 } 2296 EXPORT_SYMBOL_GPL(input_device_enabled); 2297 2298 /** 2299 * input_register_device - register device with input core 2300 * @dev: device to be registered 2301 * 2302 * This function registers device with input core. The device must be 2303 * allocated with input_allocate_device() and all it's capabilities 2304 * set up before registering. 2305 * If function fails the device must be freed with input_free_device(). 2306 * Once device has been successfully registered it can be unregistered 2307 * with input_unregister_device(); input_free_device() should not be 2308 * called in this case. 2309 * 2310 * Note that this function is also used to register managed input devices 2311 * (ones allocated with devm_input_allocate_device()). Such managed input 2312 * devices need not be explicitly unregistered or freed, their tear down 2313 * is controlled by the devres infrastructure. It is also worth noting 2314 * that tear down of managed input devices is internally a 2-step process: 2315 * registered managed input device is first unregistered, but stays in 2316 * memory and can still handle input_event() calls (although events will 2317 * not be delivered anywhere). The freeing of managed input device will 2318 * happen later, when devres stack is unwound to the point where device 2319 * allocation was made. 2320 */ 2321 int input_register_device(struct input_dev *dev) 2322 { 2323 struct input_devres *devres = NULL; 2324 struct input_handler *handler; 2325 unsigned int packet_size; 2326 const char *path; 2327 int error; 2328 2329 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2330 dev_err(&dev->dev, 2331 "Absolute device without dev->absinfo, refusing to register\n"); 2332 return -EINVAL; 2333 } 2334 2335 if (dev->devres_managed) { 2336 devres = devres_alloc(devm_input_device_unregister, 2337 sizeof(*devres), GFP_KERNEL); 2338 if (!devres) 2339 return -ENOMEM; 2340 2341 devres->input = dev; 2342 } 2343 2344 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2345 __set_bit(EV_SYN, dev->evbit); 2346 2347 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2348 __clear_bit(KEY_RESERVED, dev->keybit); 2349 2350 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2351 input_cleanse_bitmasks(dev); 2352 2353 packet_size = input_estimate_events_per_packet(dev); 2354 if (dev->hint_events_per_packet < packet_size) 2355 dev->hint_events_per_packet = packet_size; 2356 2357 dev->max_vals = dev->hint_events_per_packet + 2; 2358 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 2359 if (!dev->vals) { 2360 error = -ENOMEM; 2361 goto err_devres_free; 2362 } 2363 2364 /* 2365 * If delay and period are pre-set by the driver, then autorepeating 2366 * is handled by the driver itself and we don't do it in input.c. 2367 */ 2368 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2369 input_enable_softrepeat(dev, 250, 33); 2370 2371 if (!dev->getkeycode) 2372 dev->getkeycode = input_default_getkeycode; 2373 2374 if (!dev->setkeycode) 2375 dev->setkeycode = input_default_setkeycode; 2376 2377 if (dev->poller) 2378 input_dev_poller_finalize(dev->poller); 2379 2380 error = device_add(&dev->dev); 2381 if (error) 2382 goto err_free_vals; 2383 2384 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2385 pr_info("%s as %s\n", 2386 dev->name ? dev->name : "Unspecified device", 2387 path ? path : "N/A"); 2388 kfree(path); 2389 2390 error = mutex_lock_interruptible(&input_mutex); 2391 if (error) 2392 goto err_device_del; 2393 2394 list_add_tail(&dev->node, &input_dev_list); 2395 2396 list_for_each_entry(handler, &input_handler_list, node) 2397 input_attach_handler(dev, handler); 2398 2399 input_wakeup_procfs_readers(); 2400 2401 mutex_unlock(&input_mutex); 2402 2403 if (dev->devres_managed) { 2404 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2405 __func__, dev_name(&dev->dev)); 2406 devres_add(dev->dev.parent, devres); 2407 } 2408 return 0; 2409 2410 err_device_del: 2411 device_del(&dev->dev); 2412 err_free_vals: 2413 kfree(dev->vals); 2414 dev->vals = NULL; 2415 err_devres_free: 2416 devres_free(devres); 2417 return error; 2418 } 2419 EXPORT_SYMBOL(input_register_device); 2420 2421 /** 2422 * input_unregister_device - unregister previously registered device 2423 * @dev: device to be unregistered 2424 * 2425 * This function unregisters an input device. Once device is unregistered 2426 * the caller should not try to access it as it may get freed at any moment. 2427 */ 2428 void input_unregister_device(struct input_dev *dev) 2429 { 2430 if (dev->devres_managed) { 2431 WARN_ON(devres_destroy(dev->dev.parent, 2432 devm_input_device_unregister, 2433 devm_input_device_match, 2434 dev)); 2435 __input_unregister_device(dev); 2436 /* 2437 * We do not do input_put_device() here because it will be done 2438 * when 2nd devres fires up. 2439 */ 2440 } else { 2441 __input_unregister_device(dev); 2442 input_put_device(dev); 2443 } 2444 } 2445 EXPORT_SYMBOL(input_unregister_device); 2446 2447 /** 2448 * input_register_handler - register a new input handler 2449 * @handler: handler to be registered 2450 * 2451 * This function registers a new input handler (interface) for input 2452 * devices in the system and attaches it to all input devices that 2453 * are compatible with the handler. 2454 */ 2455 int input_register_handler(struct input_handler *handler) 2456 { 2457 struct input_dev *dev; 2458 int error; 2459 2460 error = mutex_lock_interruptible(&input_mutex); 2461 if (error) 2462 return error; 2463 2464 INIT_LIST_HEAD(&handler->h_list); 2465 2466 list_add_tail(&handler->node, &input_handler_list); 2467 2468 list_for_each_entry(dev, &input_dev_list, node) 2469 input_attach_handler(dev, handler); 2470 2471 input_wakeup_procfs_readers(); 2472 2473 mutex_unlock(&input_mutex); 2474 return 0; 2475 } 2476 EXPORT_SYMBOL(input_register_handler); 2477 2478 /** 2479 * input_unregister_handler - unregisters an input handler 2480 * @handler: handler to be unregistered 2481 * 2482 * This function disconnects a handler from its input devices and 2483 * removes it from lists of known handlers. 2484 */ 2485 void input_unregister_handler(struct input_handler *handler) 2486 { 2487 struct input_handle *handle, *next; 2488 2489 mutex_lock(&input_mutex); 2490 2491 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2492 handler->disconnect(handle); 2493 WARN_ON(!list_empty(&handler->h_list)); 2494 2495 list_del_init(&handler->node); 2496 2497 input_wakeup_procfs_readers(); 2498 2499 mutex_unlock(&input_mutex); 2500 } 2501 EXPORT_SYMBOL(input_unregister_handler); 2502 2503 /** 2504 * input_handler_for_each_handle - handle iterator 2505 * @handler: input handler to iterate 2506 * @data: data for the callback 2507 * @fn: function to be called for each handle 2508 * 2509 * Iterate over @bus's list of devices, and call @fn for each, passing 2510 * it @data and stop when @fn returns a non-zero value. The function is 2511 * using RCU to traverse the list and therefore may be using in atomic 2512 * contexts. The @fn callback is invoked from RCU critical section and 2513 * thus must not sleep. 2514 */ 2515 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2516 int (*fn)(struct input_handle *, void *)) 2517 { 2518 struct input_handle *handle; 2519 int retval = 0; 2520 2521 rcu_read_lock(); 2522 2523 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2524 retval = fn(handle, data); 2525 if (retval) 2526 break; 2527 } 2528 2529 rcu_read_unlock(); 2530 2531 return retval; 2532 } 2533 EXPORT_SYMBOL(input_handler_for_each_handle); 2534 2535 /** 2536 * input_register_handle - register a new input handle 2537 * @handle: handle to register 2538 * 2539 * This function puts a new input handle onto device's 2540 * and handler's lists so that events can flow through 2541 * it once it is opened using input_open_device(). 2542 * 2543 * This function is supposed to be called from handler's 2544 * connect() method. 2545 */ 2546 int input_register_handle(struct input_handle *handle) 2547 { 2548 struct input_handler *handler = handle->handler; 2549 struct input_dev *dev = handle->dev; 2550 int error; 2551 2552 /* 2553 * We take dev->mutex here to prevent race with 2554 * input_release_device(). 2555 */ 2556 error = mutex_lock_interruptible(&dev->mutex); 2557 if (error) 2558 return error; 2559 2560 /* 2561 * Filters go to the head of the list, normal handlers 2562 * to the tail. 2563 */ 2564 if (handler->filter) 2565 list_add_rcu(&handle->d_node, &dev->h_list); 2566 else 2567 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2568 2569 mutex_unlock(&dev->mutex); 2570 2571 /* 2572 * Since we are supposed to be called from ->connect() 2573 * which is mutually exclusive with ->disconnect() 2574 * we can't be racing with input_unregister_handle() 2575 * and so separate lock is not needed here. 2576 */ 2577 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2578 2579 if (handler->start) 2580 handler->start(handle); 2581 2582 return 0; 2583 } 2584 EXPORT_SYMBOL(input_register_handle); 2585 2586 /** 2587 * input_unregister_handle - unregister an input handle 2588 * @handle: handle to unregister 2589 * 2590 * This function removes input handle from device's 2591 * and handler's lists. 2592 * 2593 * This function is supposed to be called from handler's 2594 * disconnect() method. 2595 */ 2596 void input_unregister_handle(struct input_handle *handle) 2597 { 2598 struct input_dev *dev = handle->dev; 2599 2600 list_del_rcu(&handle->h_node); 2601 2602 /* 2603 * Take dev->mutex to prevent race with input_release_device(). 2604 */ 2605 mutex_lock(&dev->mutex); 2606 list_del_rcu(&handle->d_node); 2607 mutex_unlock(&dev->mutex); 2608 2609 synchronize_rcu(); 2610 } 2611 EXPORT_SYMBOL(input_unregister_handle); 2612 2613 /** 2614 * input_get_new_minor - allocates a new input minor number 2615 * @legacy_base: beginning or the legacy range to be searched 2616 * @legacy_num: size of legacy range 2617 * @allow_dynamic: whether we can also take ID from the dynamic range 2618 * 2619 * This function allocates a new device minor for from input major namespace. 2620 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2621 * parameters and whether ID can be allocated from dynamic range if there are 2622 * no free IDs in legacy range. 2623 */ 2624 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2625 bool allow_dynamic) 2626 { 2627 /* 2628 * This function should be called from input handler's ->connect() 2629 * methods, which are serialized with input_mutex, so no additional 2630 * locking is needed here. 2631 */ 2632 if (legacy_base >= 0) { 2633 int minor = ida_simple_get(&input_ida, 2634 legacy_base, 2635 legacy_base + legacy_num, 2636 GFP_KERNEL); 2637 if (minor >= 0 || !allow_dynamic) 2638 return minor; 2639 } 2640 2641 return ida_simple_get(&input_ida, 2642 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES, 2643 GFP_KERNEL); 2644 } 2645 EXPORT_SYMBOL(input_get_new_minor); 2646 2647 /** 2648 * input_free_minor - release previously allocated minor 2649 * @minor: minor to be released 2650 * 2651 * This function releases previously allocated input minor so that it can be 2652 * reused later. 2653 */ 2654 void input_free_minor(unsigned int minor) 2655 { 2656 ida_simple_remove(&input_ida, minor); 2657 } 2658 EXPORT_SYMBOL(input_free_minor); 2659 2660 static int __init input_init(void) 2661 { 2662 int err; 2663 2664 err = class_register(&input_class); 2665 if (err) { 2666 pr_err("unable to register input_dev class\n"); 2667 return err; 2668 } 2669 2670 err = input_proc_init(); 2671 if (err) 2672 goto fail1; 2673 2674 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2675 INPUT_MAX_CHAR_DEVICES, "input"); 2676 if (err) { 2677 pr_err("unable to register char major %d", INPUT_MAJOR); 2678 goto fail2; 2679 } 2680 2681 return 0; 2682 2683 fail2: input_proc_exit(); 2684 fail1: class_unregister(&input_class); 2685 return err; 2686 } 2687 2688 static void __exit input_exit(void) 2689 { 2690 input_proc_exit(); 2691 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2692 INPUT_MAX_CHAR_DEVICES); 2693 class_unregister(&input_class); 2694 } 2695 2696 subsys_initcall(input_init); 2697 module_exit(input_exit); 2698