1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The input core 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 9 #define pr_fmt(fmt) KBUILD_BASENAME ": " fmt 10 11 #include <linux/init.h> 12 #include <linux/types.h> 13 #include <linux/idr.h> 14 #include <linux/input/mt.h> 15 #include <linux/module.h> 16 #include <linux/slab.h> 17 #include <linux/random.h> 18 #include <linux/major.h> 19 #include <linux/proc_fs.h> 20 #include <linux/sched.h> 21 #include <linux/seq_file.h> 22 #include <linux/pm.h> 23 #include <linux/poll.h> 24 #include <linux/device.h> 25 #include <linux/kstrtox.h> 26 #include <linux/mutex.h> 27 #include <linux/rcupdate.h> 28 #include "input-compat.h" 29 #include "input-core-private.h" 30 #include "input-poller.h" 31 32 MODULE_AUTHOR("Vojtech Pavlik <vojtech@suse.cz>"); 33 MODULE_DESCRIPTION("Input core"); 34 MODULE_LICENSE("GPL"); 35 36 #define INPUT_MAX_CHAR_DEVICES 1024 37 #define INPUT_FIRST_DYNAMIC_DEV 256 38 static DEFINE_IDA(input_ida); 39 40 static LIST_HEAD(input_dev_list); 41 static LIST_HEAD(input_handler_list); 42 43 /* 44 * input_mutex protects access to both input_dev_list and input_handler_list. 45 * This also causes input_[un]register_device and input_[un]register_handler 46 * be mutually exclusive which simplifies locking in drivers implementing 47 * input handlers. 48 */ 49 static DEFINE_MUTEX(input_mutex); 50 51 static const struct input_value input_value_sync = { EV_SYN, SYN_REPORT, 1 }; 52 53 static const unsigned int input_max_code[EV_CNT] = { 54 [EV_KEY] = KEY_MAX, 55 [EV_REL] = REL_MAX, 56 [EV_ABS] = ABS_MAX, 57 [EV_MSC] = MSC_MAX, 58 [EV_SW] = SW_MAX, 59 [EV_LED] = LED_MAX, 60 [EV_SND] = SND_MAX, 61 [EV_FF] = FF_MAX, 62 }; 63 64 static inline int is_event_supported(unsigned int code, 65 unsigned long *bm, unsigned int max) 66 { 67 return code <= max && test_bit(code, bm); 68 } 69 70 static int input_defuzz_abs_event(int value, int old_val, int fuzz) 71 { 72 if (fuzz) { 73 if (value > old_val - fuzz / 2 && value < old_val + fuzz / 2) 74 return old_val; 75 76 if (value > old_val - fuzz && value < old_val + fuzz) 77 return (old_val * 3 + value) / 4; 78 79 if (value > old_val - fuzz * 2 && value < old_val + fuzz * 2) 80 return (old_val + value) / 2; 81 } 82 83 return value; 84 } 85 86 static void input_start_autorepeat(struct input_dev *dev, int code) 87 { 88 if (test_bit(EV_REP, dev->evbit) && 89 dev->rep[REP_PERIOD] && dev->rep[REP_DELAY] && 90 dev->timer.function) { 91 dev->repeat_key = code; 92 mod_timer(&dev->timer, 93 jiffies + msecs_to_jiffies(dev->rep[REP_DELAY])); 94 } 95 } 96 97 static void input_stop_autorepeat(struct input_dev *dev) 98 { 99 del_timer(&dev->timer); 100 } 101 102 /* 103 * Pass event first through all filters and then, if event has not been 104 * filtered out, through all open handles. This function is called with 105 * dev->event_lock held and interrupts disabled. 106 */ 107 static unsigned int input_to_handler(struct input_handle *handle, 108 struct input_value *vals, unsigned int count) 109 { 110 struct input_handler *handler = handle->handler; 111 struct input_value *end = vals; 112 struct input_value *v; 113 114 if (handler->filter) { 115 for (v = vals; v != vals + count; v++) { 116 if (handler->filter(handle, v->type, v->code, v->value)) 117 continue; 118 if (end != v) 119 *end = *v; 120 end++; 121 } 122 count = end - vals; 123 } 124 125 if (!count) 126 return 0; 127 128 if (handler->events) 129 handler->events(handle, vals, count); 130 else if (handler->event) 131 for (v = vals; v != vals + count; v++) 132 handler->event(handle, v->type, v->code, v->value); 133 134 return count; 135 } 136 137 /* 138 * Pass values first through all filters and then, if event has not been 139 * filtered out, through all open handles. This function is called with 140 * dev->event_lock held and interrupts disabled. 141 */ 142 static void input_pass_values(struct input_dev *dev, 143 struct input_value *vals, unsigned int count) 144 { 145 struct input_handle *handle; 146 struct input_value *v; 147 148 lockdep_assert_held(&dev->event_lock); 149 150 if (!count) 151 return; 152 153 rcu_read_lock(); 154 155 handle = rcu_dereference(dev->grab); 156 if (handle) { 157 count = input_to_handler(handle, vals, count); 158 } else { 159 list_for_each_entry_rcu(handle, &dev->h_list, d_node) 160 if (handle->open) { 161 count = input_to_handler(handle, vals, count); 162 if (!count) 163 break; 164 } 165 } 166 167 rcu_read_unlock(); 168 169 /* trigger auto repeat for key events */ 170 if (test_bit(EV_REP, dev->evbit) && test_bit(EV_KEY, dev->evbit)) { 171 for (v = vals; v != vals + count; v++) { 172 if (v->type == EV_KEY && v->value != 2) { 173 if (v->value) 174 input_start_autorepeat(dev, v->code); 175 else 176 input_stop_autorepeat(dev); 177 } 178 } 179 } 180 } 181 182 #define INPUT_IGNORE_EVENT 0 183 #define INPUT_PASS_TO_HANDLERS 1 184 #define INPUT_PASS_TO_DEVICE 2 185 #define INPUT_SLOT 4 186 #define INPUT_FLUSH 8 187 #define INPUT_PASS_TO_ALL (INPUT_PASS_TO_HANDLERS | INPUT_PASS_TO_DEVICE) 188 189 static int input_handle_abs_event(struct input_dev *dev, 190 unsigned int code, int *pval) 191 { 192 struct input_mt *mt = dev->mt; 193 bool is_mt_event; 194 int *pold; 195 196 if (code == ABS_MT_SLOT) { 197 /* 198 * "Stage" the event; we'll flush it later, when we 199 * get actual touch data. 200 */ 201 if (mt && *pval >= 0 && *pval < mt->num_slots) 202 mt->slot = *pval; 203 204 return INPUT_IGNORE_EVENT; 205 } 206 207 is_mt_event = input_is_mt_value(code); 208 209 if (!is_mt_event) { 210 pold = &dev->absinfo[code].value; 211 } else if (mt) { 212 pold = &mt->slots[mt->slot].abs[code - ABS_MT_FIRST]; 213 } else { 214 /* 215 * Bypass filtering for multi-touch events when 216 * not employing slots. 217 */ 218 pold = NULL; 219 } 220 221 if (pold) { 222 *pval = input_defuzz_abs_event(*pval, *pold, 223 dev->absinfo[code].fuzz); 224 if (*pold == *pval) 225 return INPUT_IGNORE_EVENT; 226 227 *pold = *pval; 228 } 229 230 /* Flush pending "slot" event */ 231 if (is_mt_event && mt && mt->slot != input_abs_get_val(dev, ABS_MT_SLOT)) { 232 input_abs_set_val(dev, ABS_MT_SLOT, mt->slot); 233 return INPUT_PASS_TO_HANDLERS | INPUT_SLOT; 234 } 235 236 return INPUT_PASS_TO_HANDLERS; 237 } 238 239 static int input_get_disposition(struct input_dev *dev, 240 unsigned int type, unsigned int code, int *pval) 241 { 242 int disposition = INPUT_IGNORE_EVENT; 243 int value = *pval; 244 245 /* filter-out events from inhibited devices */ 246 if (dev->inhibited) 247 return INPUT_IGNORE_EVENT; 248 249 switch (type) { 250 251 case EV_SYN: 252 switch (code) { 253 case SYN_CONFIG: 254 disposition = INPUT_PASS_TO_ALL; 255 break; 256 257 case SYN_REPORT: 258 disposition = INPUT_PASS_TO_HANDLERS | INPUT_FLUSH; 259 break; 260 case SYN_MT_REPORT: 261 disposition = INPUT_PASS_TO_HANDLERS; 262 break; 263 } 264 break; 265 266 case EV_KEY: 267 if (is_event_supported(code, dev->keybit, KEY_MAX)) { 268 269 /* auto-repeat bypasses state updates */ 270 if (value == 2) { 271 disposition = INPUT_PASS_TO_HANDLERS; 272 break; 273 } 274 275 if (!!test_bit(code, dev->key) != !!value) { 276 277 __change_bit(code, dev->key); 278 disposition = INPUT_PASS_TO_HANDLERS; 279 } 280 } 281 break; 282 283 case EV_SW: 284 if (is_event_supported(code, dev->swbit, SW_MAX) && 285 !!test_bit(code, dev->sw) != !!value) { 286 287 __change_bit(code, dev->sw); 288 disposition = INPUT_PASS_TO_HANDLERS; 289 } 290 break; 291 292 case EV_ABS: 293 if (is_event_supported(code, dev->absbit, ABS_MAX)) 294 disposition = input_handle_abs_event(dev, code, &value); 295 296 break; 297 298 case EV_REL: 299 if (is_event_supported(code, dev->relbit, REL_MAX) && value) 300 disposition = INPUT_PASS_TO_HANDLERS; 301 302 break; 303 304 case EV_MSC: 305 if (is_event_supported(code, dev->mscbit, MSC_MAX)) 306 disposition = INPUT_PASS_TO_ALL; 307 308 break; 309 310 case EV_LED: 311 if (is_event_supported(code, dev->ledbit, LED_MAX) && 312 !!test_bit(code, dev->led) != !!value) { 313 314 __change_bit(code, dev->led); 315 disposition = INPUT_PASS_TO_ALL; 316 } 317 break; 318 319 case EV_SND: 320 if (is_event_supported(code, dev->sndbit, SND_MAX)) { 321 322 if (!!test_bit(code, dev->snd) != !!value) 323 __change_bit(code, dev->snd); 324 disposition = INPUT_PASS_TO_ALL; 325 } 326 break; 327 328 case EV_REP: 329 if (code <= REP_MAX && value >= 0 && dev->rep[code] != value) { 330 dev->rep[code] = value; 331 disposition = INPUT_PASS_TO_ALL; 332 } 333 break; 334 335 case EV_FF: 336 if (value >= 0) 337 disposition = INPUT_PASS_TO_ALL; 338 break; 339 340 case EV_PWR: 341 disposition = INPUT_PASS_TO_ALL; 342 break; 343 } 344 345 *pval = value; 346 return disposition; 347 } 348 349 static void input_event_dispose(struct input_dev *dev, int disposition, 350 unsigned int type, unsigned int code, int value) 351 { 352 if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event) 353 dev->event(dev, type, code, value); 354 355 if (!dev->vals) 356 return; 357 358 if (disposition & INPUT_PASS_TO_HANDLERS) { 359 struct input_value *v; 360 361 if (disposition & INPUT_SLOT) { 362 v = &dev->vals[dev->num_vals++]; 363 v->type = EV_ABS; 364 v->code = ABS_MT_SLOT; 365 v->value = dev->mt->slot; 366 } 367 368 v = &dev->vals[dev->num_vals++]; 369 v->type = type; 370 v->code = code; 371 v->value = value; 372 } 373 374 if (disposition & INPUT_FLUSH) { 375 if (dev->num_vals >= 2) 376 input_pass_values(dev, dev->vals, dev->num_vals); 377 dev->num_vals = 0; 378 /* 379 * Reset the timestamp on flush so we won't end up 380 * with a stale one. Note we only need to reset the 381 * monolithic one as we use its presence when deciding 382 * whether to generate a synthetic timestamp. 383 */ 384 dev->timestamp[INPUT_CLK_MONO] = ktime_set(0, 0); 385 } else if (dev->num_vals >= dev->max_vals - 2) { 386 dev->vals[dev->num_vals++] = input_value_sync; 387 input_pass_values(dev, dev->vals, dev->num_vals); 388 dev->num_vals = 0; 389 } 390 } 391 392 void input_handle_event(struct input_dev *dev, 393 unsigned int type, unsigned int code, int value) 394 { 395 int disposition; 396 397 lockdep_assert_held(&dev->event_lock); 398 399 disposition = input_get_disposition(dev, type, code, &value); 400 if (disposition != INPUT_IGNORE_EVENT) { 401 if (type != EV_SYN) 402 add_input_randomness(type, code, value); 403 404 input_event_dispose(dev, disposition, type, code, value); 405 } 406 } 407 408 /** 409 * input_event() - report new input event 410 * @dev: device that generated the event 411 * @type: type of the event 412 * @code: event code 413 * @value: value of the event 414 * 415 * This function should be used by drivers implementing various input 416 * devices to report input events. See also input_inject_event(). 417 * 418 * NOTE: input_event() may be safely used right after input device was 419 * allocated with input_allocate_device(), even before it is registered 420 * with input_register_device(), but the event will not reach any of the 421 * input handlers. Such early invocation of input_event() may be used 422 * to 'seed' initial state of a switch or initial position of absolute 423 * axis, etc. 424 */ 425 void input_event(struct input_dev *dev, 426 unsigned int type, unsigned int code, int value) 427 { 428 unsigned long flags; 429 430 if (is_event_supported(type, dev->evbit, EV_MAX)) { 431 432 spin_lock_irqsave(&dev->event_lock, flags); 433 input_handle_event(dev, type, code, value); 434 spin_unlock_irqrestore(&dev->event_lock, flags); 435 } 436 } 437 EXPORT_SYMBOL(input_event); 438 439 /** 440 * input_inject_event() - send input event from input handler 441 * @handle: input handle to send event through 442 * @type: type of the event 443 * @code: event code 444 * @value: value of the event 445 * 446 * Similar to input_event() but will ignore event if device is 447 * "grabbed" and handle injecting event is not the one that owns 448 * the device. 449 */ 450 void input_inject_event(struct input_handle *handle, 451 unsigned int type, unsigned int code, int value) 452 { 453 struct input_dev *dev = handle->dev; 454 struct input_handle *grab; 455 unsigned long flags; 456 457 if (is_event_supported(type, dev->evbit, EV_MAX)) { 458 spin_lock_irqsave(&dev->event_lock, flags); 459 460 rcu_read_lock(); 461 grab = rcu_dereference(dev->grab); 462 if (!grab || grab == handle) 463 input_handle_event(dev, type, code, value); 464 rcu_read_unlock(); 465 466 spin_unlock_irqrestore(&dev->event_lock, flags); 467 } 468 } 469 EXPORT_SYMBOL(input_inject_event); 470 471 /** 472 * input_alloc_absinfo - allocates array of input_absinfo structs 473 * @dev: the input device emitting absolute events 474 * 475 * If the absinfo struct the caller asked for is already allocated, this 476 * functions will not do anything. 477 */ 478 void input_alloc_absinfo(struct input_dev *dev) 479 { 480 if (dev->absinfo) 481 return; 482 483 dev->absinfo = kcalloc(ABS_CNT, sizeof(*dev->absinfo), GFP_KERNEL); 484 if (!dev->absinfo) { 485 dev_err(dev->dev.parent ?: &dev->dev, 486 "%s: unable to allocate memory\n", __func__); 487 /* 488 * We will handle this allocation failure in 489 * input_register_device() when we refuse to register input 490 * device with ABS bits but without absinfo. 491 */ 492 } 493 } 494 EXPORT_SYMBOL(input_alloc_absinfo); 495 496 void input_set_abs_params(struct input_dev *dev, unsigned int axis, 497 int min, int max, int fuzz, int flat) 498 { 499 struct input_absinfo *absinfo; 500 501 __set_bit(EV_ABS, dev->evbit); 502 __set_bit(axis, dev->absbit); 503 504 input_alloc_absinfo(dev); 505 if (!dev->absinfo) 506 return; 507 508 absinfo = &dev->absinfo[axis]; 509 absinfo->minimum = min; 510 absinfo->maximum = max; 511 absinfo->fuzz = fuzz; 512 absinfo->flat = flat; 513 } 514 EXPORT_SYMBOL(input_set_abs_params); 515 516 /** 517 * input_copy_abs - Copy absinfo from one input_dev to another 518 * @dst: Destination input device to copy the abs settings to 519 * @dst_axis: ABS_* value selecting the destination axis 520 * @src: Source input device to copy the abs settings from 521 * @src_axis: ABS_* value selecting the source axis 522 * 523 * Set absinfo for the selected destination axis by copying it from 524 * the specified source input device's source axis. 525 * This is useful to e.g. setup a pen/stylus input-device for combined 526 * touchscreen/pen hardware where the pen uses the same coordinates as 527 * the touchscreen. 528 */ 529 void input_copy_abs(struct input_dev *dst, unsigned int dst_axis, 530 const struct input_dev *src, unsigned int src_axis) 531 { 532 /* src must have EV_ABS and src_axis set */ 533 if (WARN_ON(!(test_bit(EV_ABS, src->evbit) && 534 test_bit(src_axis, src->absbit)))) 535 return; 536 537 /* 538 * input_alloc_absinfo() may have failed for the source. Our caller is 539 * expected to catch this when registering the input devices, which may 540 * happen after the input_copy_abs() call. 541 */ 542 if (!src->absinfo) 543 return; 544 545 input_set_capability(dst, EV_ABS, dst_axis); 546 if (!dst->absinfo) 547 return; 548 549 dst->absinfo[dst_axis] = src->absinfo[src_axis]; 550 } 551 EXPORT_SYMBOL(input_copy_abs); 552 553 /** 554 * input_grab_device - grabs device for exclusive use 555 * @handle: input handle that wants to own the device 556 * 557 * When a device is grabbed by an input handle all events generated by 558 * the device are delivered only to this handle. Also events injected 559 * by other input handles are ignored while device is grabbed. 560 */ 561 int input_grab_device(struct input_handle *handle) 562 { 563 struct input_dev *dev = handle->dev; 564 int retval; 565 566 retval = mutex_lock_interruptible(&dev->mutex); 567 if (retval) 568 return retval; 569 570 if (dev->grab) { 571 retval = -EBUSY; 572 goto out; 573 } 574 575 rcu_assign_pointer(dev->grab, handle); 576 577 out: 578 mutex_unlock(&dev->mutex); 579 return retval; 580 } 581 EXPORT_SYMBOL(input_grab_device); 582 583 static void __input_release_device(struct input_handle *handle) 584 { 585 struct input_dev *dev = handle->dev; 586 struct input_handle *grabber; 587 588 grabber = rcu_dereference_protected(dev->grab, 589 lockdep_is_held(&dev->mutex)); 590 if (grabber == handle) { 591 rcu_assign_pointer(dev->grab, NULL); 592 /* Make sure input_pass_values() notices that grab is gone */ 593 synchronize_rcu(); 594 595 list_for_each_entry(handle, &dev->h_list, d_node) 596 if (handle->open && handle->handler->start) 597 handle->handler->start(handle); 598 } 599 } 600 601 /** 602 * input_release_device - release previously grabbed device 603 * @handle: input handle that owns the device 604 * 605 * Releases previously grabbed device so that other input handles can 606 * start receiving input events. Upon release all handlers attached 607 * to the device have their start() method called so they have a change 608 * to synchronize device state with the rest of the system. 609 */ 610 void input_release_device(struct input_handle *handle) 611 { 612 struct input_dev *dev = handle->dev; 613 614 mutex_lock(&dev->mutex); 615 __input_release_device(handle); 616 mutex_unlock(&dev->mutex); 617 } 618 EXPORT_SYMBOL(input_release_device); 619 620 /** 621 * input_open_device - open input device 622 * @handle: handle through which device is being accessed 623 * 624 * This function should be called by input handlers when they 625 * want to start receive events from given input device. 626 */ 627 int input_open_device(struct input_handle *handle) 628 { 629 struct input_dev *dev = handle->dev; 630 int retval; 631 632 retval = mutex_lock_interruptible(&dev->mutex); 633 if (retval) 634 return retval; 635 636 if (dev->going_away) { 637 retval = -ENODEV; 638 goto out; 639 } 640 641 handle->open++; 642 643 if (dev->users++ || dev->inhibited) { 644 /* 645 * Device is already opened and/or inhibited, 646 * so we can exit immediately and report success. 647 */ 648 goto out; 649 } 650 651 if (dev->open) { 652 retval = dev->open(dev); 653 if (retval) { 654 dev->users--; 655 handle->open--; 656 /* 657 * Make sure we are not delivering any more events 658 * through this handle 659 */ 660 synchronize_rcu(); 661 goto out; 662 } 663 } 664 665 if (dev->poller) 666 input_dev_poller_start(dev->poller); 667 668 out: 669 mutex_unlock(&dev->mutex); 670 return retval; 671 } 672 EXPORT_SYMBOL(input_open_device); 673 674 int input_flush_device(struct input_handle *handle, struct file *file) 675 { 676 struct input_dev *dev = handle->dev; 677 int retval; 678 679 retval = mutex_lock_interruptible(&dev->mutex); 680 if (retval) 681 return retval; 682 683 if (dev->flush) 684 retval = dev->flush(dev, file); 685 686 mutex_unlock(&dev->mutex); 687 return retval; 688 } 689 EXPORT_SYMBOL(input_flush_device); 690 691 /** 692 * input_close_device - close input device 693 * @handle: handle through which device is being accessed 694 * 695 * This function should be called by input handlers when they 696 * want to stop receive events from given input device. 697 */ 698 void input_close_device(struct input_handle *handle) 699 { 700 struct input_dev *dev = handle->dev; 701 702 mutex_lock(&dev->mutex); 703 704 __input_release_device(handle); 705 706 if (!dev->inhibited && !--dev->users) { 707 if (dev->poller) 708 input_dev_poller_stop(dev->poller); 709 if (dev->close) 710 dev->close(dev); 711 } 712 713 if (!--handle->open) { 714 /* 715 * synchronize_rcu() makes sure that input_pass_values() 716 * completed and that no more input events are delivered 717 * through this handle 718 */ 719 synchronize_rcu(); 720 } 721 722 mutex_unlock(&dev->mutex); 723 } 724 EXPORT_SYMBOL(input_close_device); 725 726 /* 727 * Simulate keyup events for all keys that are marked as pressed. 728 * The function must be called with dev->event_lock held. 729 */ 730 static bool input_dev_release_keys(struct input_dev *dev) 731 { 732 bool need_sync = false; 733 int code; 734 735 lockdep_assert_held(&dev->event_lock); 736 737 if (is_event_supported(EV_KEY, dev->evbit, EV_MAX)) { 738 for_each_set_bit(code, dev->key, KEY_CNT) { 739 input_handle_event(dev, EV_KEY, code, 0); 740 need_sync = true; 741 } 742 } 743 744 return need_sync; 745 } 746 747 /* 748 * Prepare device for unregistering 749 */ 750 static void input_disconnect_device(struct input_dev *dev) 751 { 752 struct input_handle *handle; 753 754 /* 755 * Mark device as going away. Note that we take dev->mutex here 756 * not to protect access to dev->going_away but rather to ensure 757 * that there are no threads in the middle of input_open_device() 758 */ 759 mutex_lock(&dev->mutex); 760 dev->going_away = true; 761 mutex_unlock(&dev->mutex); 762 763 spin_lock_irq(&dev->event_lock); 764 765 /* 766 * Simulate keyup events for all pressed keys so that handlers 767 * are not left with "stuck" keys. The driver may continue 768 * generate events even after we done here but they will not 769 * reach any handlers. 770 */ 771 if (input_dev_release_keys(dev)) 772 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 773 774 list_for_each_entry(handle, &dev->h_list, d_node) 775 handle->open = 0; 776 777 spin_unlock_irq(&dev->event_lock); 778 } 779 780 /** 781 * input_scancode_to_scalar() - converts scancode in &struct input_keymap_entry 782 * @ke: keymap entry containing scancode to be converted. 783 * @scancode: pointer to the location where converted scancode should 784 * be stored. 785 * 786 * This function is used to convert scancode stored in &struct keymap_entry 787 * into scalar form understood by legacy keymap handling methods. These 788 * methods expect scancodes to be represented as 'unsigned int'. 789 */ 790 int input_scancode_to_scalar(const struct input_keymap_entry *ke, 791 unsigned int *scancode) 792 { 793 switch (ke->len) { 794 case 1: 795 *scancode = *((u8 *)ke->scancode); 796 break; 797 798 case 2: 799 *scancode = *((u16 *)ke->scancode); 800 break; 801 802 case 4: 803 *scancode = *((u32 *)ke->scancode); 804 break; 805 806 default: 807 return -EINVAL; 808 } 809 810 return 0; 811 } 812 EXPORT_SYMBOL(input_scancode_to_scalar); 813 814 /* 815 * Those routines handle the default case where no [gs]etkeycode() is 816 * defined. In this case, an array indexed by the scancode is used. 817 */ 818 819 static unsigned int input_fetch_keycode(struct input_dev *dev, 820 unsigned int index) 821 { 822 switch (dev->keycodesize) { 823 case 1: 824 return ((u8 *)dev->keycode)[index]; 825 826 case 2: 827 return ((u16 *)dev->keycode)[index]; 828 829 default: 830 return ((u32 *)dev->keycode)[index]; 831 } 832 } 833 834 static int input_default_getkeycode(struct input_dev *dev, 835 struct input_keymap_entry *ke) 836 { 837 unsigned int index; 838 int error; 839 840 if (!dev->keycodesize) 841 return -EINVAL; 842 843 if (ke->flags & INPUT_KEYMAP_BY_INDEX) 844 index = ke->index; 845 else { 846 error = input_scancode_to_scalar(ke, &index); 847 if (error) 848 return error; 849 } 850 851 if (index >= dev->keycodemax) 852 return -EINVAL; 853 854 ke->keycode = input_fetch_keycode(dev, index); 855 ke->index = index; 856 ke->len = sizeof(index); 857 memcpy(ke->scancode, &index, sizeof(index)); 858 859 return 0; 860 } 861 862 static int input_default_setkeycode(struct input_dev *dev, 863 const struct input_keymap_entry *ke, 864 unsigned int *old_keycode) 865 { 866 unsigned int index; 867 int error; 868 int i; 869 870 if (!dev->keycodesize) 871 return -EINVAL; 872 873 if (ke->flags & INPUT_KEYMAP_BY_INDEX) { 874 index = ke->index; 875 } else { 876 error = input_scancode_to_scalar(ke, &index); 877 if (error) 878 return error; 879 } 880 881 if (index >= dev->keycodemax) 882 return -EINVAL; 883 884 if (dev->keycodesize < sizeof(ke->keycode) && 885 (ke->keycode >> (dev->keycodesize * 8))) 886 return -EINVAL; 887 888 switch (dev->keycodesize) { 889 case 1: { 890 u8 *k = (u8 *)dev->keycode; 891 *old_keycode = k[index]; 892 k[index] = ke->keycode; 893 break; 894 } 895 case 2: { 896 u16 *k = (u16 *)dev->keycode; 897 *old_keycode = k[index]; 898 k[index] = ke->keycode; 899 break; 900 } 901 default: { 902 u32 *k = (u32 *)dev->keycode; 903 *old_keycode = k[index]; 904 k[index] = ke->keycode; 905 break; 906 } 907 } 908 909 if (*old_keycode <= KEY_MAX) { 910 __clear_bit(*old_keycode, dev->keybit); 911 for (i = 0; i < dev->keycodemax; i++) { 912 if (input_fetch_keycode(dev, i) == *old_keycode) { 913 __set_bit(*old_keycode, dev->keybit); 914 /* Setting the bit twice is useless, so break */ 915 break; 916 } 917 } 918 } 919 920 __set_bit(ke->keycode, dev->keybit); 921 return 0; 922 } 923 924 /** 925 * input_get_keycode - retrieve keycode currently mapped to a given scancode 926 * @dev: input device which keymap is being queried 927 * @ke: keymap entry 928 * 929 * This function should be called by anyone interested in retrieving current 930 * keymap. Presently evdev handlers use it. 931 */ 932 int input_get_keycode(struct input_dev *dev, struct input_keymap_entry *ke) 933 { 934 unsigned long flags; 935 int retval; 936 937 spin_lock_irqsave(&dev->event_lock, flags); 938 retval = dev->getkeycode(dev, ke); 939 spin_unlock_irqrestore(&dev->event_lock, flags); 940 941 return retval; 942 } 943 EXPORT_SYMBOL(input_get_keycode); 944 945 /** 946 * input_set_keycode - attribute a keycode to a given scancode 947 * @dev: input device which keymap is being updated 948 * @ke: new keymap entry 949 * 950 * This function should be called by anyone needing to update current 951 * keymap. Presently keyboard and evdev handlers use it. 952 */ 953 int input_set_keycode(struct input_dev *dev, 954 const struct input_keymap_entry *ke) 955 { 956 unsigned long flags; 957 unsigned int old_keycode; 958 int retval; 959 960 if (ke->keycode > KEY_MAX) 961 return -EINVAL; 962 963 spin_lock_irqsave(&dev->event_lock, flags); 964 965 retval = dev->setkeycode(dev, ke, &old_keycode); 966 if (retval) 967 goto out; 968 969 /* Make sure KEY_RESERVED did not get enabled. */ 970 __clear_bit(KEY_RESERVED, dev->keybit); 971 972 /* 973 * Simulate keyup event if keycode is not present 974 * in the keymap anymore 975 */ 976 if (old_keycode > KEY_MAX) { 977 dev_warn(dev->dev.parent ?: &dev->dev, 978 "%s: got too big old keycode %#x\n", 979 __func__, old_keycode); 980 } else if (test_bit(EV_KEY, dev->evbit) && 981 !is_event_supported(old_keycode, dev->keybit, KEY_MAX) && 982 __test_and_clear_bit(old_keycode, dev->key)) { 983 /* 984 * We have to use input_event_dispose() here directly instead 985 * of input_handle_event() because the key we want to release 986 * here is considered no longer supported by the device and 987 * input_handle_event() will ignore it. 988 */ 989 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS, 990 EV_KEY, old_keycode, 0); 991 input_event_dispose(dev, INPUT_PASS_TO_HANDLERS | INPUT_FLUSH, 992 EV_SYN, SYN_REPORT, 1); 993 } 994 995 out: 996 spin_unlock_irqrestore(&dev->event_lock, flags); 997 998 return retval; 999 } 1000 EXPORT_SYMBOL(input_set_keycode); 1001 1002 bool input_match_device_id(const struct input_dev *dev, 1003 const struct input_device_id *id) 1004 { 1005 if (id->flags & INPUT_DEVICE_ID_MATCH_BUS) 1006 if (id->bustype != dev->id.bustype) 1007 return false; 1008 1009 if (id->flags & INPUT_DEVICE_ID_MATCH_VENDOR) 1010 if (id->vendor != dev->id.vendor) 1011 return false; 1012 1013 if (id->flags & INPUT_DEVICE_ID_MATCH_PRODUCT) 1014 if (id->product != dev->id.product) 1015 return false; 1016 1017 if (id->flags & INPUT_DEVICE_ID_MATCH_VERSION) 1018 if (id->version != dev->id.version) 1019 return false; 1020 1021 if (!bitmap_subset(id->evbit, dev->evbit, EV_MAX) || 1022 !bitmap_subset(id->keybit, dev->keybit, KEY_MAX) || 1023 !bitmap_subset(id->relbit, dev->relbit, REL_MAX) || 1024 !bitmap_subset(id->absbit, dev->absbit, ABS_MAX) || 1025 !bitmap_subset(id->mscbit, dev->mscbit, MSC_MAX) || 1026 !bitmap_subset(id->ledbit, dev->ledbit, LED_MAX) || 1027 !bitmap_subset(id->sndbit, dev->sndbit, SND_MAX) || 1028 !bitmap_subset(id->ffbit, dev->ffbit, FF_MAX) || 1029 !bitmap_subset(id->swbit, dev->swbit, SW_MAX) || 1030 !bitmap_subset(id->propbit, dev->propbit, INPUT_PROP_MAX)) { 1031 return false; 1032 } 1033 1034 return true; 1035 } 1036 EXPORT_SYMBOL(input_match_device_id); 1037 1038 static const struct input_device_id *input_match_device(struct input_handler *handler, 1039 struct input_dev *dev) 1040 { 1041 const struct input_device_id *id; 1042 1043 for (id = handler->id_table; id->flags || id->driver_info; id++) { 1044 if (input_match_device_id(dev, id) && 1045 (!handler->match || handler->match(handler, dev))) { 1046 return id; 1047 } 1048 } 1049 1050 return NULL; 1051 } 1052 1053 static int input_attach_handler(struct input_dev *dev, struct input_handler *handler) 1054 { 1055 const struct input_device_id *id; 1056 int error; 1057 1058 id = input_match_device(handler, dev); 1059 if (!id) 1060 return -ENODEV; 1061 1062 error = handler->connect(handler, dev, id); 1063 if (error && error != -ENODEV) 1064 pr_err("failed to attach handler %s to device %s, error: %d\n", 1065 handler->name, kobject_name(&dev->dev.kobj), error); 1066 1067 return error; 1068 } 1069 1070 #ifdef CONFIG_COMPAT 1071 1072 static int input_bits_to_string(char *buf, int buf_size, 1073 unsigned long bits, bool skip_empty) 1074 { 1075 int len = 0; 1076 1077 if (in_compat_syscall()) { 1078 u32 dword = bits >> 32; 1079 if (dword || !skip_empty) 1080 len += snprintf(buf, buf_size, "%x ", dword); 1081 1082 dword = bits & 0xffffffffUL; 1083 if (dword || !skip_empty || len) 1084 len += snprintf(buf + len, max(buf_size - len, 0), 1085 "%x", dword); 1086 } else { 1087 if (bits || !skip_empty) 1088 len += snprintf(buf, buf_size, "%lx", bits); 1089 } 1090 1091 return len; 1092 } 1093 1094 #else /* !CONFIG_COMPAT */ 1095 1096 static int input_bits_to_string(char *buf, int buf_size, 1097 unsigned long bits, bool skip_empty) 1098 { 1099 return bits || !skip_empty ? 1100 snprintf(buf, buf_size, "%lx", bits) : 0; 1101 } 1102 1103 #endif 1104 1105 #ifdef CONFIG_PROC_FS 1106 1107 static struct proc_dir_entry *proc_bus_input_dir; 1108 static DECLARE_WAIT_QUEUE_HEAD(input_devices_poll_wait); 1109 static int input_devices_state; 1110 1111 static inline void input_wakeup_procfs_readers(void) 1112 { 1113 input_devices_state++; 1114 wake_up(&input_devices_poll_wait); 1115 } 1116 1117 static __poll_t input_proc_devices_poll(struct file *file, poll_table *wait) 1118 { 1119 poll_wait(file, &input_devices_poll_wait, wait); 1120 if (file->f_version != input_devices_state) { 1121 file->f_version = input_devices_state; 1122 return EPOLLIN | EPOLLRDNORM; 1123 } 1124 1125 return 0; 1126 } 1127 1128 union input_seq_state { 1129 struct { 1130 unsigned short pos; 1131 bool mutex_acquired; 1132 }; 1133 void *p; 1134 }; 1135 1136 static void *input_devices_seq_start(struct seq_file *seq, loff_t *pos) 1137 { 1138 union input_seq_state *state = (union input_seq_state *)&seq->private; 1139 int error; 1140 1141 /* We need to fit into seq->private pointer */ 1142 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1143 1144 error = mutex_lock_interruptible(&input_mutex); 1145 if (error) { 1146 state->mutex_acquired = false; 1147 return ERR_PTR(error); 1148 } 1149 1150 state->mutex_acquired = true; 1151 1152 return seq_list_start(&input_dev_list, *pos); 1153 } 1154 1155 static void *input_devices_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1156 { 1157 return seq_list_next(v, &input_dev_list, pos); 1158 } 1159 1160 static void input_seq_stop(struct seq_file *seq, void *v) 1161 { 1162 union input_seq_state *state = (union input_seq_state *)&seq->private; 1163 1164 if (state->mutex_acquired) 1165 mutex_unlock(&input_mutex); 1166 } 1167 1168 static void input_seq_print_bitmap(struct seq_file *seq, const char *name, 1169 unsigned long *bitmap, int max) 1170 { 1171 int i; 1172 bool skip_empty = true; 1173 char buf[18]; 1174 1175 seq_printf(seq, "B: %s=", name); 1176 1177 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1178 if (input_bits_to_string(buf, sizeof(buf), 1179 bitmap[i], skip_empty)) { 1180 skip_empty = false; 1181 seq_printf(seq, "%s%s", buf, i > 0 ? " " : ""); 1182 } 1183 } 1184 1185 /* 1186 * If no output was produced print a single 0. 1187 */ 1188 if (skip_empty) 1189 seq_putc(seq, '0'); 1190 1191 seq_putc(seq, '\n'); 1192 } 1193 1194 static int input_devices_seq_show(struct seq_file *seq, void *v) 1195 { 1196 struct input_dev *dev = container_of(v, struct input_dev, node); 1197 const char *path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 1198 struct input_handle *handle; 1199 1200 seq_printf(seq, "I: Bus=%04x Vendor=%04x Product=%04x Version=%04x\n", 1201 dev->id.bustype, dev->id.vendor, dev->id.product, dev->id.version); 1202 1203 seq_printf(seq, "N: Name=\"%s\"\n", dev->name ? dev->name : ""); 1204 seq_printf(seq, "P: Phys=%s\n", dev->phys ? dev->phys : ""); 1205 seq_printf(seq, "S: Sysfs=%s\n", path ? path : ""); 1206 seq_printf(seq, "U: Uniq=%s\n", dev->uniq ? dev->uniq : ""); 1207 seq_puts(seq, "H: Handlers="); 1208 1209 list_for_each_entry(handle, &dev->h_list, d_node) 1210 seq_printf(seq, "%s ", handle->name); 1211 seq_putc(seq, '\n'); 1212 1213 input_seq_print_bitmap(seq, "PROP", dev->propbit, INPUT_PROP_MAX); 1214 1215 input_seq_print_bitmap(seq, "EV", dev->evbit, EV_MAX); 1216 if (test_bit(EV_KEY, dev->evbit)) 1217 input_seq_print_bitmap(seq, "KEY", dev->keybit, KEY_MAX); 1218 if (test_bit(EV_REL, dev->evbit)) 1219 input_seq_print_bitmap(seq, "REL", dev->relbit, REL_MAX); 1220 if (test_bit(EV_ABS, dev->evbit)) 1221 input_seq_print_bitmap(seq, "ABS", dev->absbit, ABS_MAX); 1222 if (test_bit(EV_MSC, dev->evbit)) 1223 input_seq_print_bitmap(seq, "MSC", dev->mscbit, MSC_MAX); 1224 if (test_bit(EV_LED, dev->evbit)) 1225 input_seq_print_bitmap(seq, "LED", dev->ledbit, LED_MAX); 1226 if (test_bit(EV_SND, dev->evbit)) 1227 input_seq_print_bitmap(seq, "SND", dev->sndbit, SND_MAX); 1228 if (test_bit(EV_FF, dev->evbit)) 1229 input_seq_print_bitmap(seq, "FF", dev->ffbit, FF_MAX); 1230 if (test_bit(EV_SW, dev->evbit)) 1231 input_seq_print_bitmap(seq, "SW", dev->swbit, SW_MAX); 1232 1233 seq_putc(seq, '\n'); 1234 1235 kfree(path); 1236 return 0; 1237 } 1238 1239 static const struct seq_operations input_devices_seq_ops = { 1240 .start = input_devices_seq_start, 1241 .next = input_devices_seq_next, 1242 .stop = input_seq_stop, 1243 .show = input_devices_seq_show, 1244 }; 1245 1246 static int input_proc_devices_open(struct inode *inode, struct file *file) 1247 { 1248 return seq_open(file, &input_devices_seq_ops); 1249 } 1250 1251 static const struct proc_ops input_devices_proc_ops = { 1252 .proc_open = input_proc_devices_open, 1253 .proc_poll = input_proc_devices_poll, 1254 .proc_read = seq_read, 1255 .proc_lseek = seq_lseek, 1256 .proc_release = seq_release, 1257 }; 1258 1259 static void *input_handlers_seq_start(struct seq_file *seq, loff_t *pos) 1260 { 1261 union input_seq_state *state = (union input_seq_state *)&seq->private; 1262 int error; 1263 1264 /* We need to fit into seq->private pointer */ 1265 BUILD_BUG_ON(sizeof(union input_seq_state) != sizeof(seq->private)); 1266 1267 error = mutex_lock_interruptible(&input_mutex); 1268 if (error) { 1269 state->mutex_acquired = false; 1270 return ERR_PTR(error); 1271 } 1272 1273 state->mutex_acquired = true; 1274 state->pos = *pos; 1275 1276 return seq_list_start(&input_handler_list, *pos); 1277 } 1278 1279 static void *input_handlers_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1280 { 1281 union input_seq_state *state = (union input_seq_state *)&seq->private; 1282 1283 state->pos = *pos + 1; 1284 return seq_list_next(v, &input_handler_list, pos); 1285 } 1286 1287 static int input_handlers_seq_show(struct seq_file *seq, void *v) 1288 { 1289 struct input_handler *handler = container_of(v, struct input_handler, node); 1290 union input_seq_state *state = (union input_seq_state *)&seq->private; 1291 1292 seq_printf(seq, "N: Number=%u Name=%s", state->pos, handler->name); 1293 if (handler->filter) 1294 seq_puts(seq, " (filter)"); 1295 if (handler->legacy_minors) 1296 seq_printf(seq, " Minor=%d", handler->minor); 1297 seq_putc(seq, '\n'); 1298 1299 return 0; 1300 } 1301 1302 static const struct seq_operations input_handlers_seq_ops = { 1303 .start = input_handlers_seq_start, 1304 .next = input_handlers_seq_next, 1305 .stop = input_seq_stop, 1306 .show = input_handlers_seq_show, 1307 }; 1308 1309 static int input_proc_handlers_open(struct inode *inode, struct file *file) 1310 { 1311 return seq_open(file, &input_handlers_seq_ops); 1312 } 1313 1314 static const struct proc_ops input_handlers_proc_ops = { 1315 .proc_open = input_proc_handlers_open, 1316 .proc_read = seq_read, 1317 .proc_lseek = seq_lseek, 1318 .proc_release = seq_release, 1319 }; 1320 1321 static int __init input_proc_init(void) 1322 { 1323 struct proc_dir_entry *entry; 1324 1325 proc_bus_input_dir = proc_mkdir("bus/input", NULL); 1326 if (!proc_bus_input_dir) 1327 return -ENOMEM; 1328 1329 entry = proc_create("devices", 0, proc_bus_input_dir, 1330 &input_devices_proc_ops); 1331 if (!entry) 1332 goto fail1; 1333 1334 entry = proc_create("handlers", 0, proc_bus_input_dir, 1335 &input_handlers_proc_ops); 1336 if (!entry) 1337 goto fail2; 1338 1339 return 0; 1340 1341 fail2: remove_proc_entry("devices", proc_bus_input_dir); 1342 fail1: remove_proc_entry("bus/input", NULL); 1343 return -ENOMEM; 1344 } 1345 1346 static void input_proc_exit(void) 1347 { 1348 remove_proc_entry("devices", proc_bus_input_dir); 1349 remove_proc_entry("handlers", proc_bus_input_dir); 1350 remove_proc_entry("bus/input", NULL); 1351 } 1352 1353 #else /* !CONFIG_PROC_FS */ 1354 static inline void input_wakeup_procfs_readers(void) { } 1355 static inline int input_proc_init(void) { return 0; } 1356 static inline void input_proc_exit(void) { } 1357 #endif 1358 1359 #define INPUT_DEV_STRING_ATTR_SHOW(name) \ 1360 static ssize_t input_dev_show_##name(struct device *dev, \ 1361 struct device_attribute *attr, \ 1362 char *buf) \ 1363 { \ 1364 struct input_dev *input_dev = to_input_dev(dev); \ 1365 \ 1366 return scnprintf(buf, PAGE_SIZE, "%s\n", \ 1367 input_dev->name ? input_dev->name : ""); \ 1368 } \ 1369 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_##name, NULL) 1370 1371 INPUT_DEV_STRING_ATTR_SHOW(name); 1372 INPUT_DEV_STRING_ATTR_SHOW(phys); 1373 INPUT_DEV_STRING_ATTR_SHOW(uniq); 1374 1375 static int input_print_modalias_bits(char *buf, int size, 1376 char name, const unsigned long *bm, 1377 unsigned int min_bit, unsigned int max_bit) 1378 { 1379 int len = 0, i; 1380 1381 len += snprintf(buf, max(size, 0), "%c", name); 1382 for (i = min_bit; i < max_bit; i++) 1383 if (bm[BIT_WORD(i)] & BIT_MASK(i)) 1384 len += snprintf(buf + len, max(size - len, 0), "%X,", i); 1385 return len; 1386 } 1387 1388 static int input_print_modalias(char *buf, int size, const struct input_dev *id, 1389 int add_cr) 1390 { 1391 int len; 1392 1393 len = snprintf(buf, max(size, 0), 1394 "input:b%04Xv%04Xp%04Xe%04X-", 1395 id->id.bustype, id->id.vendor, 1396 id->id.product, id->id.version); 1397 1398 len += input_print_modalias_bits(buf + len, size - len, 1399 'e', id->evbit, 0, EV_MAX); 1400 len += input_print_modalias_bits(buf + len, size - len, 1401 'k', id->keybit, KEY_MIN_INTERESTING, KEY_MAX); 1402 len += input_print_modalias_bits(buf + len, size - len, 1403 'r', id->relbit, 0, REL_MAX); 1404 len += input_print_modalias_bits(buf + len, size - len, 1405 'a', id->absbit, 0, ABS_MAX); 1406 len += input_print_modalias_bits(buf + len, size - len, 1407 'm', id->mscbit, 0, MSC_MAX); 1408 len += input_print_modalias_bits(buf + len, size - len, 1409 'l', id->ledbit, 0, LED_MAX); 1410 len += input_print_modalias_bits(buf + len, size - len, 1411 's', id->sndbit, 0, SND_MAX); 1412 len += input_print_modalias_bits(buf + len, size - len, 1413 'f', id->ffbit, 0, FF_MAX); 1414 len += input_print_modalias_bits(buf + len, size - len, 1415 'w', id->swbit, 0, SW_MAX); 1416 1417 if (add_cr) 1418 len += snprintf(buf + len, max(size - len, 0), "\n"); 1419 1420 return len; 1421 } 1422 1423 static ssize_t input_dev_show_modalias(struct device *dev, 1424 struct device_attribute *attr, 1425 char *buf) 1426 { 1427 struct input_dev *id = to_input_dev(dev); 1428 ssize_t len; 1429 1430 len = input_print_modalias(buf, PAGE_SIZE, id, 1); 1431 1432 return min_t(int, len, PAGE_SIZE); 1433 } 1434 static DEVICE_ATTR(modalias, S_IRUGO, input_dev_show_modalias, NULL); 1435 1436 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1437 int max, int add_cr); 1438 1439 static ssize_t input_dev_show_properties(struct device *dev, 1440 struct device_attribute *attr, 1441 char *buf) 1442 { 1443 struct input_dev *input_dev = to_input_dev(dev); 1444 int len = input_print_bitmap(buf, PAGE_SIZE, input_dev->propbit, 1445 INPUT_PROP_MAX, true); 1446 return min_t(int, len, PAGE_SIZE); 1447 } 1448 static DEVICE_ATTR(properties, S_IRUGO, input_dev_show_properties, NULL); 1449 1450 static int input_inhibit_device(struct input_dev *dev); 1451 static int input_uninhibit_device(struct input_dev *dev); 1452 1453 static ssize_t inhibited_show(struct device *dev, 1454 struct device_attribute *attr, 1455 char *buf) 1456 { 1457 struct input_dev *input_dev = to_input_dev(dev); 1458 1459 return scnprintf(buf, PAGE_SIZE, "%d\n", input_dev->inhibited); 1460 } 1461 1462 static ssize_t inhibited_store(struct device *dev, 1463 struct device_attribute *attr, const char *buf, 1464 size_t len) 1465 { 1466 struct input_dev *input_dev = to_input_dev(dev); 1467 ssize_t rv; 1468 bool inhibited; 1469 1470 if (kstrtobool(buf, &inhibited)) 1471 return -EINVAL; 1472 1473 if (inhibited) 1474 rv = input_inhibit_device(input_dev); 1475 else 1476 rv = input_uninhibit_device(input_dev); 1477 1478 if (rv != 0) 1479 return rv; 1480 1481 return len; 1482 } 1483 1484 static DEVICE_ATTR_RW(inhibited); 1485 1486 static struct attribute *input_dev_attrs[] = { 1487 &dev_attr_name.attr, 1488 &dev_attr_phys.attr, 1489 &dev_attr_uniq.attr, 1490 &dev_attr_modalias.attr, 1491 &dev_attr_properties.attr, 1492 &dev_attr_inhibited.attr, 1493 NULL 1494 }; 1495 1496 static const struct attribute_group input_dev_attr_group = { 1497 .attrs = input_dev_attrs, 1498 }; 1499 1500 #define INPUT_DEV_ID_ATTR(name) \ 1501 static ssize_t input_dev_show_id_##name(struct device *dev, \ 1502 struct device_attribute *attr, \ 1503 char *buf) \ 1504 { \ 1505 struct input_dev *input_dev = to_input_dev(dev); \ 1506 return scnprintf(buf, PAGE_SIZE, "%04x\n", input_dev->id.name); \ 1507 } \ 1508 static DEVICE_ATTR(name, S_IRUGO, input_dev_show_id_##name, NULL) 1509 1510 INPUT_DEV_ID_ATTR(bustype); 1511 INPUT_DEV_ID_ATTR(vendor); 1512 INPUT_DEV_ID_ATTR(product); 1513 INPUT_DEV_ID_ATTR(version); 1514 1515 static struct attribute *input_dev_id_attrs[] = { 1516 &dev_attr_bustype.attr, 1517 &dev_attr_vendor.attr, 1518 &dev_attr_product.attr, 1519 &dev_attr_version.attr, 1520 NULL 1521 }; 1522 1523 static const struct attribute_group input_dev_id_attr_group = { 1524 .name = "id", 1525 .attrs = input_dev_id_attrs, 1526 }; 1527 1528 static int input_print_bitmap(char *buf, int buf_size, const unsigned long *bitmap, 1529 int max, int add_cr) 1530 { 1531 int i; 1532 int len = 0; 1533 bool skip_empty = true; 1534 1535 for (i = BITS_TO_LONGS(max) - 1; i >= 0; i--) { 1536 len += input_bits_to_string(buf + len, max(buf_size - len, 0), 1537 bitmap[i], skip_empty); 1538 if (len) { 1539 skip_empty = false; 1540 if (i > 0) 1541 len += snprintf(buf + len, max(buf_size - len, 0), " "); 1542 } 1543 } 1544 1545 /* 1546 * If no output was produced print a single 0. 1547 */ 1548 if (len == 0) 1549 len = snprintf(buf, buf_size, "%d", 0); 1550 1551 if (add_cr) 1552 len += snprintf(buf + len, max(buf_size - len, 0), "\n"); 1553 1554 return len; 1555 } 1556 1557 #define INPUT_DEV_CAP_ATTR(ev, bm) \ 1558 static ssize_t input_dev_show_cap_##bm(struct device *dev, \ 1559 struct device_attribute *attr, \ 1560 char *buf) \ 1561 { \ 1562 struct input_dev *input_dev = to_input_dev(dev); \ 1563 int len = input_print_bitmap(buf, PAGE_SIZE, \ 1564 input_dev->bm##bit, ev##_MAX, \ 1565 true); \ 1566 return min_t(int, len, PAGE_SIZE); \ 1567 } \ 1568 static DEVICE_ATTR(bm, S_IRUGO, input_dev_show_cap_##bm, NULL) 1569 1570 INPUT_DEV_CAP_ATTR(EV, ev); 1571 INPUT_DEV_CAP_ATTR(KEY, key); 1572 INPUT_DEV_CAP_ATTR(REL, rel); 1573 INPUT_DEV_CAP_ATTR(ABS, abs); 1574 INPUT_DEV_CAP_ATTR(MSC, msc); 1575 INPUT_DEV_CAP_ATTR(LED, led); 1576 INPUT_DEV_CAP_ATTR(SND, snd); 1577 INPUT_DEV_CAP_ATTR(FF, ff); 1578 INPUT_DEV_CAP_ATTR(SW, sw); 1579 1580 static struct attribute *input_dev_caps_attrs[] = { 1581 &dev_attr_ev.attr, 1582 &dev_attr_key.attr, 1583 &dev_attr_rel.attr, 1584 &dev_attr_abs.attr, 1585 &dev_attr_msc.attr, 1586 &dev_attr_led.attr, 1587 &dev_attr_snd.attr, 1588 &dev_attr_ff.attr, 1589 &dev_attr_sw.attr, 1590 NULL 1591 }; 1592 1593 static const struct attribute_group input_dev_caps_attr_group = { 1594 .name = "capabilities", 1595 .attrs = input_dev_caps_attrs, 1596 }; 1597 1598 static const struct attribute_group *input_dev_attr_groups[] = { 1599 &input_dev_attr_group, 1600 &input_dev_id_attr_group, 1601 &input_dev_caps_attr_group, 1602 &input_poller_attribute_group, 1603 NULL 1604 }; 1605 1606 static void input_dev_release(struct device *device) 1607 { 1608 struct input_dev *dev = to_input_dev(device); 1609 1610 input_ff_destroy(dev); 1611 input_mt_destroy_slots(dev); 1612 kfree(dev->poller); 1613 kfree(dev->absinfo); 1614 kfree(dev->vals); 1615 kfree(dev); 1616 1617 module_put(THIS_MODULE); 1618 } 1619 1620 /* 1621 * Input uevent interface - loading event handlers based on 1622 * device bitfields. 1623 */ 1624 static int input_add_uevent_bm_var(struct kobj_uevent_env *env, 1625 const char *name, const unsigned long *bitmap, int max) 1626 { 1627 int len; 1628 1629 if (add_uevent_var(env, "%s", name)) 1630 return -ENOMEM; 1631 1632 len = input_print_bitmap(&env->buf[env->buflen - 1], 1633 sizeof(env->buf) - env->buflen, 1634 bitmap, max, false); 1635 if (len >= (sizeof(env->buf) - env->buflen)) 1636 return -ENOMEM; 1637 1638 env->buflen += len; 1639 return 0; 1640 } 1641 1642 static int input_add_uevent_modalias_var(struct kobj_uevent_env *env, 1643 const struct input_dev *dev) 1644 { 1645 int len; 1646 1647 if (add_uevent_var(env, "MODALIAS=")) 1648 return -ENOMEM; 1649 1650 len = input_print_modalias(&env->buf[env->buflen - 1], 1651 sizeof(env->buf) - env->buflen, 1652 dev, 0); 1653 if (len >= (sizeof(env->buf) - env->buflen)) 1654 return -ENOMEM; 1655 1656 env->buflen += len; 1657 return 0; 1658 } 1659 1660 #define INPUT_ADD_HOTPLUG_VAR(fmt, val...) \ 1661 do { \ 1662 int err = add_uevent_var(env, fmt, val); \ 1663 if (err) \ 1664 return err; \ 1665 } while (0) 1666 1667 #define INPUT_ADD_HOTPLUG_BM_VAR(name, bm, max) \ 1668 do { \ 1669 int err = input_add_uevent_bm_var(env, name, bm, max); \ 1670 if (err) \ 1671 return err; \ 1672 } while (0) 1673 1674 #define INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev) \ 1675 do { \ 1676 int err = input_add_uevent_modalias_var(env, dev); \ 1677 if (err) \ 1678 return err; \ 1679 } while (0) 1680 1681 static int input_dev_uevent(const struct device *device, struct kobj_uevent_env *env) 1682 { 1683 const struct input_dev *dev = to_input_dev(device); 1684 1685 INPUT_ADD_HOTPLUG_VAR("PRODUCT=%x/%x/%x/%x", 1686 dev->id.bustype, dev->id.vendor, 1687 dev->id.product, dev->id.version); 1688 if (dev->name) 1689 INPUT_ADD_HOTPLUG_VAR("NAME=\"%s\"", dev->name); 1690 if (dev->phys) 1691 INPUT_ADD_HOTPLUG_VAR("PHYS=\"%s\"", dev->phys); 1692 if (dev->uniq) 1693 INPUT_ADD_HOTPLUG_VAR("UNIQ=\"%s\"", dev->uniq); 1694 1695 INPUT_ADD_HOTPLUG_BM_VAR("PROP=", dev->propbit, INPUT_PROP_MAX); 1696 1697 INPUT_ADD_HOTPLUG_BM_VAR("EV=", dev->evbit, EV_MAX); 1698 if (test_bit(EV_KEY, dev->evbit)) 1699 INPUT_ADD_HOTPLUG_BM_VAR("KEY=", dev->keybit, KEY_MAX); 1700 if (test_bit(EV_REL, dev->evbit)) 1701 INPUT_ADD_HOTPLUG_BM_VAR("REL=", dev->relbit, REL_MAX); 1702 if (test_bit(EV_ABS, dev->evbit)) 1703 INPUT_ADD_HOTPLUG_BM_VAR("ABS=", dev->absbit, ABS_MAX); 1704 if (test_bit(EV_MSC, dev->evbit)) 1705 INPUT_ADD_HOTPLUG_BM_VAR("MSC=", dev->mscbit, MSC_MAX); 1706 if (test_bit(EV_LED, dev->evbit)) 1707 INPUT_ADD_HOTPLUG_BM_VAR("LED=", dev->ledbit, LED_MAX); 1708 if (test_bit(EV_SND, dev->evbit)) 1709 INPUT_ADD_HOTPLUG_BM_VAR("SND=", dev->sndbit, SND_MAX); 1710 if (test_bit(EV_FF, dev->evbit)) 1711 INPUT_ADD_HOTPLUG_BM_VAR("FF=", dev->ffbit, FF_MAX); 1712 if (test_bit(EV_SW, dev->evbit)) 1713 INPUT_ADD_HOTPLUG_BM_VAR("SW=", dev->swbit, SW_MAX); 1714 1715 INPUT_ADD_HOTPLUG_MODALIAS_VAR(dev); 1716 1717 return 0; 1718 } 1719 1720 #define INPUT_DO_TOGGLE(dev, type, bits, on) \ 1721 do { \ 1722 int i; \ 1723 bool active; \ 1724 \ 1725 if (!test_bit(EV_##type, dev->evbit)) \ 1726 break; \ 1727 \ 1728 for_each_set_bit(i, dev->bits##bit, type##_CNT) { \ 1729 active = test_bit(i, dev->bits); \ 1730 if (!active && !on) \ 1731 continue; \ 1732 \ 1733 dev->event(dev, EV_##type, i, on ? active : 0); \ 1734 } \ 1735 } while (0) 1736 1737 static void input_dev_toggle(struct input_dev *dev, bool activate) 1738 { 1739 if (!dev->event) 1740 return; 1741 1742 INPUT_DO_TOGGLE(dev, LED, led, activate); 1743 INPUT_DO_TOGGLE(dev, SND, snd, activate); 1744 1745 if (activate && test_bit(EV_REP, dev->evbit)) { 1746 dev->event(dev, EV_REP, REP_PERIOD, dev->rep[REP_PERIOD]); 1747 dev->event(dev, EV_REP, REP_DELAY, dev->rep[REP_DELAY]); 1748 } 1749 } 1750 1751 /** 1752 * input_reset_device() - reset/restore the state of input device 1753 * @dev: input device whose state needs to be reset 1754 * 1755 * This function tries to reset the state of an opened input device and 1756 * bring internal state and state if the hardware in sync with each other. 1757 * We mark all keys as released, restore LED state, repeat rate, etc. 1758 */ 1759 void input_reset_device(struct input_dev *dev) 1760 { 1761 unsigned long flags; 1762 1763 mutex_lock(&dev->mutex); 1764 spin_lock_irqsave(&dev->event_lock, flags); 1765 1766 input_dev_toggle(dev, true); 1767 if (input_dev_release_keys(dev)) 1768 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1769 1770 spin_unlock_irqrestore(&dev->event_lock, flags); 1771 mutex_unlock(&dev->mutex); 1772 } 1773 EXPORT_SYMBOL(input_reset_device); 1774 1775 static int input_inhibit_device(struct input_dev *dev) 1776 { 1777 mutex_lock(&dev->mutex); 1778 1779 if (dev->inhibited) 1780 goto out; 1781 1782 if (dev->users) { 1783 if (dev->close) 1784 dev->close(dev); 1785 if (dev->poller) 1786 input_dev_poller_stop(dev->poller); 1787 } 1788 1789 spin_lock_irq(&dev->event_lock); 1790 input_mt_release_slots(dev); 1791 input_dev_release_keys(dev); 1792 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 1793 input_dev_toggle(dev, false); 1794 spin_unlock_irq(&dev->event_lock); 1795 1796 dev->inhibited = true; 1797 1798 out: 1799 mutex_unlock(&dev->mutex); 1800 return 0; 1801 } 1802 1803 static int input_uninhibit_device(struct input_dev *dev) 1804 { 1805 int ret = 0; 1806 1807 mutex_lock(&dev->mutex); 1808 1809 if (!dev->inhibited) 1810 goto out; 1811 1812 if (dev->users) { 1813 if (dev->open) { 1814 ret = dev->open(dev); 1815 if (ret) 1816 goto out; 1817 } 1818 if (dev->poller) 1819 input_dev_poller_start(dev->poller); 1820 } 1821 1822 dev->inhibited = false; 1823 spin_lock_irq(&dev->event_lock); 1824 input_dev_toggle(dev, true); 1825 spin_unlock_irq(&dev->event_lock); 1826 1827 out: 1828 mutex_unlock(&dev->mutex); 1829 return ret; 1830 } 1831 1832 static int input_dev_suspend(struct device *dev) 1833 { 1834 struct input_dev *input_dev = to_input_dev(dev); 1835 1836 spin_lock_irq(&input_dev->event_lock); 1837 1838 /* 1839 * Keys that are pressed now are unlikely to be 1840 * still pressed when we resume. 1841 */ 1842 if (input_dev_release_keys(input_dev)) 1843 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1844 1845 /* Turn off LEDs and sounds, if any are active. */ 1846 input_dev_toggle(input_dev, false); 1847 1848 spin_unlock_irq(&input_dev->event_lock); 1849 1850 return 0; 1851 } 1852 1853 static int input_dev_resume(struct device *dev) 1854 { 1855 struct input_dev *input_dev = to_input_dev(dev); 1856 1857 spin_lock_irq(&input_dev->event_lock); 1858 1859 /* Restore state of LEDs and sounds, if any were active. */ 1860 input_dev_toggle(input_dev, true); 1861 1862 spin_unlock_irq(&input_dev->event_lock); 1863 1864 return 0; 1865 } 1866 1867 static int input_dev_freeze(struct device *dev) 1868 { 1869 struct input_dev *input_dev = to_input_dev(dev); 1870 1871 spin_lock_irq(&input_dev->event_lock); 1872 1873 /* 1874 * Keys that are pressed now are unlikely to be 1875 * still pressed when we resume. 1876 */ 1877 if (input_dev_release_keys(input_dev)) 1878 input_handle_event(input_dev, EV_SYN, SYN_REPORT, 1); 1879 1880 spin_unlock_irq(&input_dev->event_lock); 1881 1882 return 0; 1883 } 1884 1885 static int input_dev_poweroff(struct device *dev) 1886 { 1887 struct input_dev *input_dev = to_input_dev(dev); 1888 1889 spin_lock_irq(&input_dev->event_lock); 1890 1891 /* Turn off LEDs and sounds, if any are active. */ 1892 input_dev_toggle(input_dev, false); 1893 1894 spin_unlock_irq(&input_dev->event_lock); 1895 1896 return 0; 1897 } 1898 1899 static const struct dev_pm_ops input_dev_pm_ops = { 1900 .suspend = input_dev_suspend, 1901 .resume = input_dev_resume, 1902 .freeze = input_dev_freeze, 1903 .poweroff = input_dev_poweroff, 1904 .restore = input_dev_resume, 1905 }; 1906 1907 static const struct device_type input_dev_type = { 1908 .groups = input_dev_attr_groups, 1909 .release = input_dev_release, 1910 .uevent = input_dev_uevent, 1911 .pm = pm_sleep_ptr(&input_dev_pm_ops), 1912 }; 1913 1914 static char *input_devnode(const struct device *dev, umode_t *mode) 1915 { 1916 return kasprintf(GFP_KERNEL, "input/%s", dev_name(dev)); 1917 } 1918 1919 struct class input_class = { 1920 .name = "input", 1921 .devnode = input_devnode, 1922 }; 1923 EXPORT_SYMBOL_GPL(input_class); 1924 1925 /** 1926 * input_allocate_device - allocate memory for new input device 1927 * 1928 * Returns prepared struct input_dev or %NULL. 1929 * 1930 * NOTE: Use input_free_device() to free devices that have not been 1931 * registered; input_unregister_device() should be used for already 1932 * registered devices. 1933 */ 1934 struct input_dev *input_allocate_device(void) 1935 { 1936 static atomic_t input_no = ATOMIC_INIT(-1); 1937 struct input_dev *dev; 1938 1939 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 1940 if (dev) { 1941 dev->dev.type = &input_dev_type; 1942 dev->dev.class = &input_class; 1943 device_initialize(&dev->dev); 1944 mutex_init(&dev->mutex); 1945 spin_lock_init(&dev->event_lock); 1946 timer_setup(&dev->timer, NULL, 0); 1947 INIT_LIST_HEAD(&dev->h_list); 1948 INIT_LIST_HEAD(&dev->node); 1949 1950 dev_set_name(&dev->dev, "input%lu", 1951 (unsigned long)atomic_inc_return(&input_no)); 1952 1953 __module_get(THIS_MODULE); 1954 } 1955 1956 return dev; 1957 } 1958 EXPORT_SYMBOL(input_allocate_device); 1959 1960 struct input_devres { 1961 struct input_dev *input; 1962 }; 1963 1964 static int devm_input_device_match(struct device *dev, void *res, void *data) 1965 { 1966 struct input_devres *devres = res; 1967 1968 return devres->input == data; 1969 } 1970 1971 static void devm_input_device_release(struct device *dev, void *res) 1972 { 1973 struct input_devres *devres = res; 1974 struct input_dev *input = devres->input; 1975 1976 dev_dbg(dev, "%s: dropping reference to %s\n", 1977 __func__, dev_name(&input->dev)); 1978 input_put_device(input); 1979 } 1980 1981 /** 1982 * devm_input_allocate_device - allocate managed input device 1983 * @dev: device owning the input device being created 1984 * 1985 * Returns prepared struct input_dev or %NULL. 1986 * 1987 * Managed input devices do not need to be explicitly unregistered or 1988 * freed as it will be done automatically when owner device unbinds from 1989 * its driver (or binding fails). Once managed input device is allocated, 1990 * it is ready to be set up and registered in the same fashion as regular 1991 * input device. There are no special devm_input_device_[un]register() 1992 * variants, regular ones work with both managed and unmanaged devices, 1993 * should you need them. In most cases however, managed input device need 1994 * not be explicitly unregistered or freed. 1995 * 1996 * NOTE: the owner device is set up as parent of input device and users 1997 * should not override it. 1998 */ 1999 struct input_dev *devm_input_allocate_device(struct device *dev) 2000 { 2001 struct input_dev *input; 2002 struct input_devres *devres; 2003 2004 devres = devres_alloc(devm_input_device_release, 2005 sizeof(*devres), GFP_KERNEL); 2006 if (!devres) 2007 return NULL; 2008 2009 input = input_allocate_device(); 2010 if (!input) { 2011 devres_free(devres); 2012 return NULL; 2013 } 2014 2015 input->dev.parent = dev; 2016 input->devres_managed = true; 2017 2018 devres->input = input; 2019 devres_add(dev, devres); 2020 2021 return input; 2022 } 2023 EXPORT_SYMBOL(devm_input_allocate_device); 2024 2025 /** 2026 * input_free_device - free memory occupied by input_dev structure 2027 * @dev: input device to free 2028 * 2029 * This function should only be used if input_register_device() 2030 * was not called yet or if it failed. Once device was registered 2031 * use input_unregister_device() and memory will be freed once last 2032 * reference to the device is dropped. 2033 * 2034 * Device should be allocated by input_allocate_device(). 2035 * 2036 * NOTE: If there are references to the input device then memory 2037 * will not be freed until last reference is dropped. 2038 */ 2039 void input_free_device(struct input_dev *dev) 2040 { 2041 if (dev) { 2042 if (dev->devres_managed) 2043 WARN_ON(devres_destroy(dev->dev.parent, 2044 devm_input_device_release, 2045 devm_input_device_match, 2046 dev)); 2047 input_put_device(dev); 2048 } 2049 } 2050 EXPORT_SYMBOL(input_free_device); 2051 2052 /** 2053 * input_set_timestamp - set timestamp for input events 2054 * @dev: input device to set timestamp for 2055 * @timestamp: the time at which the event has occurred 2056 * in CLOCK_MONOTONIC 2057 * 2058 * This function is intended to provide to the input system a more 2059 * accurate time of when an event actually occurred. The driver should 2060 * call this function as soon as a timestamp is acquired ensuring 2061 * clock conversions in input_set_timestamp are done correctly. 2062 * 2063 * The system entering suspend state between timestamp acquisition and 2064 * calling input_set_timestamp can result in inaccurate conversions. 2065 */ 2066 void input_set_timestamp(struct input_dev *dev, ktime_t timestamp) 2067 { 2068 dev->timestamp[INPUT_CLK_MONO] = timestamp; 2069 dev->timestamp[INPUT_CLK_REAL] = ktime_mono_to_real(timestamp); 2070 dev->timestamp[INPUT_CLK_BOOT] = ktime_mono_to_any(timestamp, 2071 TK_OFFS_BOOT); 2072 } 2073 EXPORT_SYMBOL(input_set_timestamp); 2074 2075 /** 2076 * input_get_timestamp - get timestamp for input events 2077 * @dev: input device to get timestamp from 2078 * 2079 * A valid timestamp is a timestamp of non-zero value. 2080 */ 2081 ktime_t *input_get_timestamp(struct input_dev *dev) 2082 { 2083 const ktime_t invalid_timestamp = ktime_set(0, 0); 2084 2085 if (!ktime_compare(dev->timestamp[INPUT_CLK_MONO], invalid_timestamp)) 2086 input_set_timestamp(dev, ktime_get()); 2087 2088 return dev->timestamp; 2089 } 2090 EXPORT_SYMBOL(input_get_timestamp); 2091 2092 /** 2093 * input_set_capability - mark device as capable of a certain event 2094 * @dev: device that is capable of emitting or accepting event 2095 * @type: type of the event (EV_KEY, EV_REL, etc...) 2096 * @code: event code 2097 * 2098 * In addition to setting up corresponding bit in appropriate capability 2099 * bitmap the function also adjusts dev->evbit. 2100 */ 2101 void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int code) 2102 { 2103 if (type < EV_CNT && input_max_code[type] && 2104 code > input_max_code[type]) { 2105 pr_err("%s: invalid code %u for type %u\n", __func__, code, 2106 type); 2107 dump_stack(); 2108 return; 2109 } 2110 2111 switch (type) { 2112 case EV_KEY: 2113 __set_bit(code, dev->keybit); 2114 break; 2115 2116 case EV_REL: 2117 __set_bit(code, dev->relbit); 2118 break; 2119 2120 case EV_ABS: 2121 input_alloc_absinfo(dev); 2122 __set_bit(code, dev->absbit); 2123 break; 2124 2125 case EV_MSC: 2126 __set_bit(code, dev->mscbit); 2127 break; 2128 2129 case EV_SW: 2130 __set_bit(code, dev->swbit); 2131 break; 2132 2133 case EV_LED: 2134 __set_bit(code, dev->ledbit); 2135 break; 2136 2137 case EV_SND: 2138 __set_bit(code, dev->sndbit); 2139 break; 2140 2141 case EV_FF: 2142 __set_bit(code, dev->ffbit); 2143 break; 2144 2145 case EV_PWR: 2146 /* do nothing */ 2147 break; 2148 2149 default: 2150 pr_err("%s: unknown type %u (code %u)\n", __func__, type, code); 2151 dump_stack(); 2152 return; 2153 } 2154 2155 __set_bit(type, dev->evbit); 2156 } 2157 EXPORT_SYMBOL(input_set_capability); 2158 2159 static unsigned int input_estimate_events_per_packet(struct input_dev *dev) 2160 { 2161 int mt_slots; 2162 int i; 2163 unsigned int events; 2164 2165 if (dev->mt) { 2166 mt_slots = dev->mt->num_slots; 2167 } else if (test_bit(ABS_MT_TRACKING_ID, dev->absbit)) { 2168 mt_slots = dev->absinfo[ABS_MT_TRACKING_ID].maximum - 2169 dev->absinfo[ABS_MT_TRACKING_ID].minimum + 1, 2170 mt_slots = clamp(mt_slots, 2, 32); 2171 } else if (test_bit(ABS_MT_POSITION_X, dev->absbit)) { 2172 mt_slots = 2; 2173 } else { 2174 mt_slots = 0; 2175 } 2176 2177 events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */ 2178 2179 if (test_bit(EV_ABS, dev->evbit)) 2180 for_each_set_bit(i, dev->absbit, ABS_CNT) 2181 events += input_is_mt_axis(i) ? mt_slots : 1; 2182 2183 if (test_bit(EV_REL, dev->evbit)) 2184 events += bitmap_weight(dev->relbit, REL_CNT); 2185 2186 /* Make room for KEY and MSC events */ 2187 events += 7; 2188 2189 return events; 2190 } 2191 2192 #define INPUT_CLEANSE_BITMASK(dev, type, bits) \ 2193 do { \ 2194 if (!test_bit(EV_##type, dev->evbit)) \ 2195 memset(dev->bits##bit, 0, \ 2196 sizeof(dev->bits##bit)); \ 2197 } while (0) 2198 2199 static void input_cleanse_bitmasks(struct input_dev *dev) 2200 { 2201 INPUT_CLEANSE_BITMASK(dev, KEY, key); 2202 INPUT_CLEANSE_BITMASK(dev, REL, rel); 2203 INPUT_CLEANSE_BITMASK(dev, ABS, abs); 2204 INPUT_CLEANSE_BITMASK(dev, MSC, msc); 2205 INPUT_CLEANSE_BITMASK(dev, LED, led); 2206 INPUT_CLEANSE_BITMASK(dev, SND, snd); 2207 INPUT_CLEANSE_BITMASK(dev, FF, ff); 2208 INPUT_CLEANSE_BITMASK(dev, SW, sw); 2209 } 2210 2211 static void __input_unregister_device(struct input_dev *dev) 2212 { 2213 struct input_handle *handle, *next; 2214 2215 input_disconnect_device(dev); 2216 2217 mutex_lock(&input_mutex); 2218 2219 list_for_each_entry_safe(handle, next, &dev->h_list, d_node) 2220 handle->handler->disconnect(handle); 2221 WARN_ON(!list_empty(&dev->h_list)); 2222 2223 del_timer_sync(&dev->timer); 2224 list_del_init(&dev->node); 2225 2226 input_wakeup_procfs_readers(); 2227 2228 mutex_unlock(&input_mutex); 2229 2230 device_del(&dev->dev); 2231 } 2232 2233 static void devm_input_device_unregister(struct device *dev, void *res) 2234 { 2235 struct input_devres *devres = res; 2236 struct input_dev *input = devres->input; 2237 2238 dev_dbg(dev, "%s: unregistering device %s\n", 2239 __func__, dev_name(&input->dev)); 2240 __input_unregister_device(input); 2241 } 2242 2243 /* 2244 * Generate software autorepeat event. Note that we take 2245 * dev->event_lock here to avoid racing with input_event 2246 * which may cause keys get "stuck". 2247 */ 2248 static void input_repeat_key(struct timer_list *t) 2249 { 2250 struct input_dev *dev = from_timer(dev, t, timer); 2251 unsigned long flags; 2252 2253 spin_lock_irqsave(&dev->event_lock, flags); 2254 2255 if (!dev->inhibited && 2256 test_bit(dev->repeat_key, dev->key) && 2257 is_event_supported(dev->repeat_key, dev->keybit, KEY_MAX)) { 2258 2259 input_set_timestamp(dev, ktime_get()); 2260 input_handle_event(dev, EV_KEY, dev->repeat_key, 2); 2261 input_handle_event(dev, EV_SYN, SYN_REPORT, 1); 2262 2263 if (dev->rep[REP_PERIOD]) 2264 mod_timer(&dev->timer, jiffies + 2265 msecs_to_jiffies(dev->rep[REP_PERIOD])); 2266 } 2267 2268 spin_unlock_irqrestore(&dev->event_lock, flags); 2269 } 2270 2271 /** 2272 * input_enable_softrepeat - enable software autorepeat 2273 * @dev: input device 2274 * @delay: repeat delay 2275 * @period: repeat period 2276 * 2277 * Enable software autorepeat on the input device. 2278 */ 2279 void input_enable_softrepeat(struct input_dev *dev, int delay, int period) 2280 { 2281 dev->timer.function = input_repeat_key; 2282 dev->rep[REP_DELAY] = delay; 2283 dev->rep[REP_PERIOD] = period; 2284 } 2285 EXPORT_SYMBOL(input_enable_softrepeat); 2286 2287 bool input_device_enabled(struct input_dev *dev) 2288 { 2289 lockdep_assert_held(&dev->mutex); 2290 2291 return !dev->inhibited && dev->users > 0; 2292 } 2293 EXPORT_SYMBOL_GPL(input_device_enabled); 2294 2295 /** 2296 * input_register_device - register device with input core 2297 * @dev: device to be registered 2298 * 2299 * This function registers device with input core. The device must be 2300 * allocated with input_allocate_device() and all it's capabilities 2301 * set up before registering. 2302 * If function fails the device must be freed with input_free_device(). 2303 * Once device has been successfully registered it can be unregistered 2304 * with input_unregister_device(); input_free_device() should not be 2305 * called in this case. 2306 * 2307 * Note that this function is also used to register managed input devices 2308 * (ones allocated with devm_input_allocate_device()). Such managed input 2309 * devices need not be explicitly unregistered or freed, their tear down 2310 * is controlled by the devres infrastructure. It is also worth noting 2311 * that tear down of managed input devices is internally a 2-step process: 2312 * registered managed input device is first unregistered, but stays in 2313 * memory and can still handle input_event() calls (although events will 2314 * not be delivered anywhere). The freeing of managed input device will 2315 * happen later, when devres stack is unwound to the point where device 2316 * allocation was made. 2317 */ 2318 int input_register_device(struct input_dev *dev) 2319 { 2320 struct input_devres *devres = NULL; 2321 struct input_handler *handler; 2322 unsigned int packet_size; 2323 const char *path; 2324 int error; 2325 2326 if (test_bit(EV_ABS, dev->evbit) && !dev->absinfo) { 2327 dev_err(&dev->dev, 2328 "Absolute device without dev->absinfo, refusing to register\n"); 2329 return -EINVAL; 2330 } 2331 2332 if (dev->devres_managed) { 2333 devres = devres_alloc(devm_input_device_unregister, 2334 sizeof(*devres), GFP_KERNEL); 2335 if (!devres) 2336 return -ENOMEM; 2337 2338 devres->input = dev; 2339 } 2340 2341 /* Every input device generates EV_SYN/SYN_REPORT events. */ 2342 __set_bit(EV_SYN, dev->evbit); 2343 2344 /* KEY_RESERVED is not supposed to be transmitted to userspace. */ 2345 __clear_bit(KEY_RESERVED, dev->keybit); 2346 2347 /* Make sure that bitmasks not mentioned in dev->evbit are clean. */ 2348 input_cleanse_bitmasks(dev); 2349 2350 packet_size = input_estimate_events_per_packet(dev); 2351 if (dev->hint_events_per_packet < packet_size) 2352 dev->hint_events_per_packet = packet_size; 2353 2354 dev->max_vals = dev->hint_events_per_packet + 2; 2355 dev->vals = kcalloc(dev->max_vals, sizeof(*dev->vals), GFP_KERNEL); 2356 if (!dev->vals) { 2357 error = -ENOMEM; 2358 goto err_devres_free; 2359 } 2360 2361 /* 2362 * If delay and period are pre-set by the driver, then autorepeating 2363 * is handled by the driver itself and we don't do it in input.c. 2364 */ 2365 if (!dev->rep[REP_DELAY] && !dev->rep[REP_PERIOD]) 2366 input_enable_softrepeat(dev, 250, 33); 2367 2368 if (!dev->getkeycode) 2369 dev->getkeycode = input_default_getkeycode; 2370 2371 if (!dev->setkeycode) 2372 dev->setkeycode = input_default_setkeycode; 2373 2374 if (dev->poller) 2375 input_dev_poller_finalize(dev->poller); 2376 2377 error = device_add(&dev->dev); 2378 if (error) 2379 goto err_free_vals; 2380 2381 path = kobject_get_path(&dev->dev.kobj, GFP_KERNEL); 2382 pr_info("%s as %s\n", 2383 dev->name ? dev->name : "Unspecified device", 2384 path ? path : "N/A"); 2385 kfree(path); 2386 2387 error = mutex_lock_interruptible(&input_mutex); 2388 if (error) 2389 goto err_device_del; 2390 2391 list_add_tail(&dev->node, &input_dev_list); 2392 2393 list_for_each_entry(handler, &input_handler_list, node) 2394 input_attach_handler(dev, handler); 2395 2396 input_wakeup_procfs_readers(); 2397 2398 mutex_unlock(&input_mutex); 2399 2400 if (dev->devres_managed) { 2401 dev_dbg(dev->dev.parent, "%s: registering %s with devres.\n", 2402 __func__, dev_name(&dev->dev)); 2403 devres_add(dev->dev.parent, devres); 2404 } 2405 return 0; 2406 2407 err_device_del: 2408 device_del(&dev->dev); 2409 err_free_vals: 2410 kfree(dev->vals); 2411 dev->vals = NULL; 2412 err_devres_free: 2413 devres_free(devres); 2414 return error; 2415 } 2416 EXPORT_SYMBOL(input_register_device); 2417 2418 /** 2419 * input_unregister_device - unregister previously registered device 2420 * @dev: device to be unregistered 2421 * 2422 * This function unregisters an input device. Once device is unregistered 2423 * the caller should not try to access it as it may get freed at any moment. 2424 */ 2425 void input_unregister_device(struct input_dev *dev) 2426 { 2427 if (dev->devres_managed) { 2428 WARN_ON(devres_destroy(dev->dev.parent, 2429 devm_input_device_unregister, 2430 devm_input_device_match, 2431 dev)); 2432 __input_unregister_device(dev); 2433 /* 2434 * We do not do input_put_device() here because it will be done 2435 * when 2nd devres fires up. 2436 */ 2437 } else { 2438 __input_unregister_device(dev); 2439 input_put_device(dev); 2440 } 2441 } 2442 EXPORT_SYMBOL(input_unregister_device); 2443 2444 /** 2445 * input_register_handler - register a new input handler 2446 * @handler: handler to be registered 2447 * 2448 * This function registers a new input handler (interface) for input 2449 * devices in the system and attaches it to all input devices that 2450 * are compatible with the handler. 2451 */ 2452 int input_register_handler(struct input_handler *handler) 2453 { 2454 struct input_dev *dev; 2455 int error; 2456 2457 error = mutex_lock_interruptible(&input_mutex); 2458 if (error) 2459 return error; 2460 2461 INIT_LIST_HEAD(&handler->h_list); 2462 2463 list_add_tail(&handler->node, &input_handler_list); 2464 2465 list_for_each_entry(dev, &input_dev_list, node) 2466 input_attach_handler(dev, handler); 2467 2468 input_wakeup_procfs_readers(); 2469 2470 mutex_unlock(&input_mutex); 2471 return 0; 2472 } 2473 EXPORT_SYMBOL(input_register_handler); 2474 2475 /** 2476 * input_unregister_handler - unregisters an input handler 2477 * @handler: handler to be unregistered 2478 * 2479 * This function disconnects a handler from its input devices and 2480 * removes it from lists of known handlers. 2481 */ 2482 void input_unregister_handler(struct input_handler *handler) 2483 { 2484 struct input_handle *handle, *next; 2485 2486 mutex_lock(&input_mutex); 2487 2488 list_for_each_entry_safe(handle, next, &handler->h_list, h_node) 2489 handler->disconnect(handle); 2490 WARN_ON(!list_empty(&handler->h_list)); 2491 2492 list_del_init(&handler->node); 2493 2494 input_wakeup_procfs_readers(); 2495 2496 mutex_unlock(&input_mutex); 2497 } 2498 EXPORT_SYMBOL(input_unregister_handler); 2499 2500 /** 2501 * input_handler_for_each_handle - handle iterator 2502 * @handler: input handler to iterate 2503 * @data: data for the callback 2504 * @fn: function to be called for each handle 2505 * 2506 * Iterate over @bus's list of devices, and call @fn for each, passing 2507 * it @data and stop when @fn returns a non-zero value. The function is 2508 * using RCU to traverse the list and therefore may be using in atomic 2509 * contexts. The @fn callback is invoked from RCU critical section and 2510 * thus must not sleep. 2511 */ 2512 int input_handler_for_each_handle(struct input_handler *handler, void *data, 2513 int (*fn)(struct input_handle *, void *)) 2514 { 2515 struct input_handle *handle; 2516 int retval = 0; 2517 2518 rcu_read_lock(); 2519 2520 list_for_each_entry_rcu(handle, &handler->h_list, h_node) { 2521 retval = fn(handle, data); 2522 if (retval) 2523 break; 2524 } 2525 2526 rcu_read_unlock(); 2527 2528 return retval; 2529 } 2530 EXPORT_SYMBOL(input_handler_for_each_handle); 2531 2532 /** 2533 * input_register_handle - register a new input handle 2534 * @handle: handle to register 2535 * 2536 * This function puts a new input handle onto device's 2537 * and handler's lists so that events can flow through 2538 * it once it is opened using input_open_device(). 2539 * 2540 * This function is supposed to be called from handler's 2541 * connect() method. 2542 */ 2543 int input_register_handle(struct input_handle *handle) 2544 { 2545 struct input_handler *handler = handle->handler; 2546 struct input_dev *dev = handle->dev; 2547 int error; 2548 2549 /* 2550 * We take dev->mutex here to prevent race with 2551 * input_release_device(). 2552 */ 2553 error = mutex_lock_interruptible(&dev->mutex); 2554 if (error) 2555 return error; 2556 2557 /* 2558 * Filters go to the head of the list, normal handlers 2559 * to the tail. 2560 */ 2561 if (handler->filter) 2562 list_add_rcu(&handle->d_node, &dev->h_list); 2563 else 2564 list_add_tail_rcu(&handle->d_node, &dev->h_list); 2565 2566 mutex_unlock(&dev->mutex); 2567 2568 /* 2569 * Since we are supposed to be called from ->connect() 2570 * which is mutually exclusive with ->disconnect() 2571 * we can't be racing with input_unregister_handle() 2572 * and so separate lock is not needed here. 2573 */ 2574 list_add_tail_rcu(&handle->h_node, &handler->h_list); 2575 2576 if (handler->start) 2577 handler->start(handle); 2578 2579 return 0; 2580 } 2581 EXPORT_SYMBOL(input_register_handle); 2582 2583 /** 2584 * input_unregister_handle - unregister an input handle 2585 * @handle: handle to unregister 2586 * 2587 * This function removes input handle from device's 2588 * and handler's lists. 2589 * 2590 * This function is supposed to be called from handler's 2591 * disconnect() method. 2592 */ 2593 void input_unregister_handle(struct input_handle *handle) 2594 { 2595 struct input_dev *dev = handle->dev; 2596 2597 list_del_rcu(&handle->h_node); 2598 2599 /* 2600 * Take dev->mutex to prevent race with input_release_device(). 2601 */ 2602 mutex_lock(&dev->mutex); 2603 list_del_rcu(&handle->d_node); 2604 mutex_unlock(&dev->mutex); 2605 2606 synchronize_rcu(); 2607 } 2608 EXPORT_SYMBOL(input_unregister_handle); 2609 2610 /** 2611 * input_get_new_minor - allocates a new input minor number 2612 * @legacy_base: beginning or the legacy range to be searched 2613 * @legacy_num: size of legacy range 2614 * @allow_dynamic: whether we can also take ID from the dynamic range 2615 * 2616 * This function allocates a new device minor for from input major namespace. 2617 * Caller can request legacy minor by specifying @legacy_base and @legacy_num 2618 * parameters and whether ID can be allocated from dynamic range if there are 2619 * no free IDs in legacy range. 2620 */ 2621 int input_get_new_minor(int legacy_base, unsigned int legacy_num, 2622 bool allow_dynamic) 2623 { 2624 /* 2625 * This function should be called from input handler's ->connect() 2626 * methods, which are serialized with input_mutex, so no additional 2627 * locking is needed here. 2628 */ 2629 if (legacy_base >= 0) { 2630 int minor = ida_simple_get(&input_ida, 2631 legacy_base, 2632 legacy_base + legacy_num, 2633 GFP_KERNEL); 2634 if (minor >= 0 || !allow_dynamic) 2635 return minor; 2636 } 2637 2638 return ida_simple_get(&input_ida, 2639 INPUT_FIRST_DYNAMIC_DEV, INPUT_MAX_CHAR_DEVICES, 2640 GFP_KERNEL); 2641 } 2642 EXPORT_SYMBOL(input_get_new_minor); 2643 2644 /** 2645 * input_free_minor - release previously allocated minor 2646 * @minor: minor to be released 2647 * 2648 * This function releases previously allocated input minor so that it can be 2649 * reused later. 2650 */ 2651 void input_free_minor(unsigned int minor) 2652 { 2653 ida_simple_remove(&input_ida, minor); 2654 } 2655 EXPORT_SYMBOL(input_free_minor); 2656 2657 static int __init input_init(void) 2658 { 2659 int err; 2660 2661 err = class_register(&input_class); 2662 if (err) { 2663 pr_err("unable to register input_dev class\n"); 2664 return err; 2665 } 2666 2667 err = input_proc_init(); 2668 if (err) 2669 goto fail1; 2670 2671 err = register_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2672 INPUT_MAX_CHAR_DEVICES, "input"); 2673 if (err) { 2674 pr_err("unable to register char major %d", INPUT_MAJOR); 2675 goto fail2; 2676 } 2677 2678 return 0; 2679 2680 fail2: input_proc_exit(); 2681 fail1: class_unregister(&input_class); 2682 return err; 2683 } 2684 2685 static void __exit input_exit(void) 2686 { 2687 input_proc_exit(); 2688 unregister_chrdev_region(MKDEV(INPUT_MAJOR, 0), 2689 INPUT_MAX_CHAR_DEVICES); 2690 class_unregister(&input_class); 2691 } 2692 2693 subsys_initcall(input_init); 2694 module_exit(input_exit); 2695