1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Event char devices, giving access to raw input device events. 4 * 5 * Copyright (c) 1999-2002 Vojtech Pavlik 6 */ 7 8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 9 10 #define EVDEV_MINOR_BASE 64 11 #define EVDEV_MINORS 32 12 #define EVDEV_MIN_BUFFER_SIZE 64U 13 #define EVDEV_BUF_PACKETS 8 14 15 #include <linux/poll.h> 16 #include <linux/sched.h> 17 #include <linux/slab.h> 18 #include <linux/vmalloc.h> 19 #include <linux/mm.h> 20 #include <linux/module.h> 21 #include <linux/init.h> 22 #include <linux/input/mt.h> 23 #include <linux/major.h> 24 #include <linux/device.h> 25 #include <linux/cdev.h> 26 #include "input-compat.h" 27 28 enum evdev_clock_type { 29 EV_CLK_REAL = 0, 30 EV_CLK_MONO, 31 EV_CLK_BOOT, 32 EV_CLK_MAX 33 }; 34 35 struct evdev { 36 int open; 37 struct input_handle handle; 38 wait_queue_head_t wait; 39 struct evdev_client __rcu *grab; 40 struct list_head client_list; 41 spinlock_t client_lock; /* protects client_list */ 42 struct mutex mutex; 43 struct device dev; 44 struct cdev cdev; 45 bool exist; 46 }; 47 48 struct evdev_client { 49 unsigned int head; 50 unsigned int tail; 51 unsigned int packet_head; /* [future] position of the first element of next packet */ 52 spinlock_t buffer_lock; /* protects access to buffer, head and tail */ 53 struct fasync_struct *fasync; 54 struct evdev *evdev; 55 struct list_head node; 56 unsigned int clk_type; 57 bool revoked; 58 unsigned long *evmasks[EV_CNT]; 59 unsigned int bufsize; 60 struct input_event buffer[]; 61 }; 62 63 static size_t evdev_get_mask_cnt(unsigned int type) 64 { 65 static const size_t counts[EV_CNT] = { 66 /* EV_SYN==0 is EV_CNT, _not_ SYN_CNT, see EVIOCGBIT */ 67 [EV_SYN] = EV_CNT, 68 [EV_KEY] = KEY_CNT, 69 [EV_REL] = REL_CNT, 70 [EV_ABS] = ABS_CNT, 71 [EV_MSC] = MSC_CNT, 72 [EV_SW] = SW_CNT, 73 [EV_LED] = LED_CNT, 74 [EV_SND] = SND_CNT, 75 [EV_FF] = FF_CNT, 76 }; 77 78 return (type < EV_CNT) ? counts[type] : 0; 79 } 80 81 /* requires the buffer lock to be held */ 82 static bool __evdev_is_filtered(struct evdev_client *client, 83 unsigned int type, 84 unsigned int code) 85 { 86 unsigned long *mask; 87 size_t cnt; 88 89 /* EV_SYN and unknown codes are never filtered */ 90 if (type == EV_SYN || type >= EV_CNT) 91 return false; 92 93 /* first test whether the type is filtered */ 94 mask = client->evmasks[0]; 95 if (mask && !test_bit(type, mask)) 96 return true; 97 98 /* unknown values are never filtered */ 99 cnt = evdev_get_mask_cnt(type); 100 if (!cnt || code >= cnt) 101 return false; 102 103 mask = client->evmasks[type]; 104 return mask && !test_bit(code, mask); 105 } 106 107 /* flush queued events of type @type, caller must hold client->buffer_lock */ 108 static void __evdev_flush_queue(struct evdev_client *client, unsigned int type) 109 { 110 unsigned int i, head, num; 111 unsigned int mask = client->bufsize - 1; 112 bool is_report; 113 struct input_event *ev; 114 115 BUG_ON(type == EV_SYN); 116 117 head = client->tail; 118 client->packet_head = client->tail; 119 120 /* init to 1 so a leading SYN_REPORT will not be dropped */ 121 num = 1; 122 123 for (i = client->tail; i != client->head; i = (i + 1) & mask) { 124 ev = &client->buffer[i]; 125 is_report = ev->type == EV_SYN && ev->code == SYN_REPORT; 126 127 if (ev->type == type) { 128 /* drop matched entry */ 129 continue; 130 } else if (is_report && !num) { 131 /* drop empty SYN_REPORT groups */ 132 continue; 133 } else if (head != i) { 134 /* move entry to fill the gap */ 135 client->buffer[head] = *ev; 136 } 137 138 num++; 139 head = (head + 1) & mask; 140 141 if (is_report) { 142 num = 0; 143 client->packet_head = head; 144 } 145 } 146 147 client->head = head; 148 } 149 150 static void __evdev_queue_syn_dropped(struct evdev_client *client) 151 { 152 struct input_event ev; 153 ktime_t time; 154 struct timespec64 ts; 155 156 time = client->clk_type == EV_CLK_REAL ? 157 ktime_get_real() : 158 client->clk_type == EV_CLK_MONO ? 159 ktime_get() : 160 ktime_get_boottime(); 161 162 ts = ktime_to_timespec64(time); 163 ev.input_event_sec = ts.tv_sec; 164 ev.input_event_usec = ts.tv_nsec / NSEC_PER_USEC; 165 ev.type = EV_SYN; 166 ev.code = SYN_DROPPED; 167 ev.value = 0; 168 169 client->buffer[client->head++] = ev; 170 client->head &= client->bufsize - 1; 171 172 if (unlikely(client->head == client->tail)) { 173 /* drop queue but keep our SYN_DROPPED event */ 174 client->tail = (client->head - 1) & (client->bufsize - 1); 175 client->packet_head = client->tail; 176 } 177 } 178 179 static void evdev_queue_syn_dropped(struct evdev_client *client) 180 { 181 unsigned long flags; 182 183 spin_lock_irqsave(&client->buffer_lock, flags); 184 __evdev_queue_syn_dropped(client); 185 spin_unlock_irqrestore(&client->buffer_lock, flags); 186 } 187 188 static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid) 189 { 190 unsigned long flags; 191 unsigned int clk_type; 192 193 switch (clkid) { 194 195 case CLOCK_REALTIME: 196 clk_type = EV_CLK_REAL; 197 break; 198 case CLOCK_MONOTONIC: 199 clk_type = EV_CLK_MONO; 200 break; 201 case CLOCK_BOOTTIME: 202 clk_type = EV_CLK_BOOT; 203 break; 204 default: 205 return -EINVAL; 206 } 207 208 if (client->clk_type != clk_type) { 209 client->clk_type = clk_type; 210 211 /* 212 * Flush pending events and queue SYN_DROPPED event, 213 * but only if the queue is not empty. 214 */ 215 spin_lock_irqsave(&client->buffer_lock, flags); 216 217 if (client->head != client->tail) { 218 client->packet_head = client->head = client->tail; 219 __evdev_queue_syn_dropped(client); 220 } 221 222 spin_unlock_irqrestore(&client->buffer_lock, flags); 223 } 224 225 return 0; 226 } 227 228 static void __pass_event(struct evdev_client *client, 229 const struct input_event *event) 230 { 231 client->buffer[client->head++] = *event; 232 client->head &= client->bufsize - 1; 233 234 if (unlikely(client->head == client->tail)) { 235 /* 236 * This effectively "drops" all unconsumed events, leaving 237 * EV_SYN/SYN_DROPPED plus the newest event in the queue. 238 */ 239 client->tail = (client->head - 2) & (client->bufsize - 1); 240 241 client->buffer[client->tail].input_event_sec = 242 event->input_event_sec; 243 client->buffer[client->tail].input_event_usec = 244 event->input_event_usec; 245 client->buffer[client->tail].type = EV_SYN; 246 client->buffer[client->tail].code = SYN_DROPPED; 247 client->buffer[client->tail].value = 0; 248 249 client->packet_head = client->tail; 250 } 251 252 if (event->type == EV_SYN && event->code == SYN_REPORT) { 253 client->packet_head = client->head; 254 kill_fasync(&client->fasync, SIGIO, POLL_IN); 255 } 256 } 257 258 static void evdev_pass_values(struct evdev_client *client, 259 const struct input_value *vals, unsigned int count, 260 ktime_t *ev_time) 261 { 262 struct evdev *evdev = client->evdev; 263 const struct input_value *v; 264 struct input_event event; 265 struct timespec64 ts; 266 bool wakeup = false; 267 268 if (client->revoked) 269 return; 270 271 ts = ktime_to_timespec64(ev_time[client->clk_type]); 272 event.input_event_sec = ts.tv_sec; 273 event.input_event_usec = ts.tv_nsec / NSEC_PER_USEC; 274 275 /* Interrupts are disabled, just acquire the lock. */ 276 spin_lock(&client->buffer_lock); 277 278 for (v = vals; v != vals + count; v++) { 279 if (__evdev_is_filtered(client, v->type, v->code)) 280 continue; 281 282 if (v->type == EV_SYN && v->code == SYN_REPORT) { 283 /* drop empty SYN_REPORT */ 284 if (client->packet_head == client->head) 285 continue; 286 287 wakeup = true; 288 } 289 290 event.type = v->type; 291 event.code = v->code; 292 event.value = v->value; 293 __pass_event(client, &event); 294 } 295 296 spin_unlock(&client->buffer_lock); 297 298 if (wakeup) 299 wake_up_interruptible(&evdev->wait); 300 } 301 302 /* 303 * Pass incoming events to all connected clients. 304 */ 305 static void evdev_events(struct input_handle *handle, 306 const struct input_value *vals, unsigned int count) 307 { 308 struct evdev *evdev = handle->private; 309 struct evdev_client *client; 310 ktime_t ev_time[EV_CLK_MAX]; 311 312 ev_time[EV_CLK_MONO] = ktime_get(); 313 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); 314 ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO], 315 TK_OFFS_BOOT); 316 317 rcu_read_lock(); 318 319 client = rcu_dereference(evdev->grab); 320 321 if (client) 322 evdev_pass_values(client, vals, count, ev_time); 323 else 324 list_for_each_entry_rcu(client, &evdev->client_list, node) 325 evdev_pass_values(client, vals, count, ev_time); 326 327 rcu_read_unlock(); 328 } 329 330 /* 331 * Pass incoming event to all connected clients. 332 */ 333 static void evdev_event(struct input_handle *handle, 334 unsigned int type, unsigned int code, int value) 335 { 336 struct input_value vals[] = { { type, code, value } }; 337 338 evdev_events(handle, vals, 1); 339 } 340 341 static int evdev_fasync(int fd, struct file *file, int on) 342 { 343 struct evdev_client *client = file->private_data; 344 345 return fasync_helper(fd, file, on, &client->fasync); 346 } 347 348 static int evdev_flush(struct file *file, fl_owner_t id) 349 { 350 struct evdev_client *client = file->private_data; 351 struct evdev *evdev = client->evdev; 352 353 mutex_lock(&evdev->mutex); 354 355 if (evdev->exist && !client->revoked) 356 input_flush_device(&evdev->handle, file); 357 358 mutex_unlock(&evdev->mutex); 359 return 0; 360 } 361 362 static void evdev_free(struct device *dev) 363 { 364 struct evdev *evdev = container_of(dev, struct evdev, dev); 365 366 input_put_device(evdev->handle.dev); 367 kfree(evdev); 368 } 369 370 /* 371 * Grabs an event device (along with underlying input device). 372 * This function is called with evdev->mutex taken. 373 */ 374 static int evdev_grab(struct evdev *evdev, struct evdev_client *client) 375 { 376 int error; 377 378 if (evdev->grab) 379 return -EBUSY; 380 381 error = input_grab_device(&evdev->handle); 382 if (error) 383 return error; 384 385 rcu_assign_pointer(evdev->grab, client); 386 387 return 0; 388 } 389 390 static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client) 391 { 392 struct evdev_client *grab = rcu_dereference_protected(evdev->grab, 393 lockdep_is_held(&evdev->mutex)); 394 395 if (grab != client) 396 return -EINVAL; 397 398 rcu_assign_pointer(evdev->grab, NULL); 399 synchronize_rcu(); 400 input_release_device(&evdev->handle); 401 402 return 0; 403 } 404 405 static void evdev_attach_client(struct evdev *evdev, 406 struct evdev_client *client) 407 { 408 spin_lock(&evdev->client_lock); 409 list_add_tail_rcu(&client->node, &evdev->client_list); 410 spin_unlock(&evdev->client_lock); 411 } 412 413 static void evdev_detach_client(struct evdev *evdev, 414 struct evdev_client *client) 415 { 416 spin_lock(&evdev->client_lock); 417 list_del_rcu(&client->node); 418 spin_unlock(&evdev->client_lock); 419 synchronize_rcu(); 420 } 421 422 static int evdev_open_device(struct evdev *evdev) 423 { 424 int retval; 425 426 retval = mutex_lock_interruptible(&evdev->mutex); 427 if (retval) 428 return retval; 429 430 if (!evdev->exist) 431 retval = -ENODEV; 432 else if (!evdev->open++) { 433 retval = input_open_device(&evdev->handle); 434 if (retval) 435 evdev->open--; 436 } 437 438 mutex_unlock(&evdev->mutex); 439 return retval; 440 } 441 442 static void evdev_close_device(struct evdev *evdev) 443 { 444 mutex_lock(&evdev->mutex); 445 446 if (evdev->exist && !--evdev->open) 447 input_close_device(&evdev->handle); 448 449 mutex_unlock(&evdev->mutex); 450 } 451 452 /* 453 * Wake up users waiting for IO so they can disconnect from 454 * dead device. 455 */ 456 static void evdev_hangup(struct evdev *evdev) 457 { 458 struct evdev_client *client; 459 460 spin_lock(&evdev->client_lock); 461 list_for_each_entry(client, &evdev->client_list, node) 462 kill_fasync(&client->fasync, SIGIO, POLL_HUP); 463 spin_unlock(&evdev->client_lock); 464 465 wake_up_interruptible(&evdev->wait); 466 } 467 468 static int evdev_release(struct inode *inode, struct file *file) 469 { 470 struct evdev_client *client = file->private_data; 471 struct evdev *evdev = client->evdev; 472 unsigned int i; 473 474 mutex_lock(&evdev->mutex); 475 evdev_ungrab(evdev, client); 476 mutex_unlock(&evdev->mutex); 477 478 evdev_detach_client(evdev, client); 479 480 for (i = 0; i < EV_CNT; ++i) 481 bitmap_free(client->evmasks[i]); 482 483 kvfree(client); 484 485 evdev_close_device(evdev); 486 487 return 0; 488 } 489 490 static unsigned int evdev_compute_buffer_size(struct input_dev *dev) 491 { 492 unsigned int n_events = 493 max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS, 494 EVDEV_MIN_BUFFER_SIZE); 495 496 return roundup_pow_of_two(n_events); 497 } 498 499 static int evdev_open(struct inode *inode, struct file *file) 500 { 501 struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev); 502 unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev); 503 struct evdev_client *client; 504 int error; 505 506 client = kzalloc(struct_size(client, buffer, bufsize), 507 GFP_KERNEL | __GFP_NOWARN); 508 if (!client) 509 client = vzalloc(struct_size(client, buffer, bufsize)); 510 if (!client) 511 return -ENOMEM; 512 513 client->bufsize = bufsize; 514 spin_lock_init(&client->buffer_lock); 515 client->evdev = evdev; 516 evdev_attach_client(evdev, client); 517 518 error = evdev_open_device(evdev); 519 if (error) 520 goto err_free_client; 521 522 file->private_data = client; 523 stream_open(inode, file); 524 525 return 0; 526 527 err_free_client: 528 evdev_detach_client(evdev, client); 529 kvfree(client); 530 return error; 531 } 532 533 static ssize_t evdev_write(struct file *file, const char __user *buffer, 534 size_t count, loff_t *ppos) 535 { 536 struct evdev_client *client = file->private_data; 537 struct evdev *evdev = client->evdev; 538 struct input_event event; 539 int retval = 0; 540 541 if (count != 0 && count < input_event_size()) 542 return -EINVAL; 543 544 retval = mutex_lock_interruptible(&evdev->mutex); 545 if (retval) 546 return retval; 547 548 if (!evdev->exist || client->revoked) { 549 retval = -ENODEV; 550 goto out; 551 } 552 553 while (retval + input_event_size() <= count) { 554 555 if (input_event_from_user(buffer + retval, &event)) { 556 retval = -EFAULT; 557 goto out; 558 } 559 retval += input_event_size(); 560 561 input_inject_event(&evdev->handle, 562 event.type, event.code, event.value); 563 cond_resched(); 564 } 565 566 out: 567 mutex_unlock(&evdev->mutex); 568 return retval; 569 } 570 571 static int evdev_fetch_next_event(struct evdev_client *client, 572 struct input_event *event) 573 { 574 int have_event; 575 576 spin_lock_irq(&client->buffer_lock); 577 578 have_event = client->packet_head != client->tail; 579 if (have_event) { 580 *event = client->buffer[client->tail++]; 581 client->tail &= client->bufsize - 1; 582 } 583 584 spin_unlock_irq(&client->buffer_lock); 585 586 return have_event; 587 } 588 589 static ssize_t evdev_read(struct file *file, char __user *buffer, 590 size_t count, loff_t *ppos) 591 { 592 struct evdev_client *client = file->private_data; 593 struct evdev *evdev = client->evdev; 594 struct input_event event; 595 size_t read = 0; 596 int error; 597 598 if (count != 0 && count < input_event_size()) 599 return -EINVAL; 600 601 for (;;) { 602 if (!evdev->exist || client->revoked) 603 return -ENODEV; 604 605 if (client->packet_head == client->tail && 606 (file->f_flags & O_NONBLOCK)) 607 return -EAGAIN; 608 609 /* 610 * count == 0 is special - no IO is done but we check 611 * for error conditions (see above). 612 */ 613 if (count == 0) 614 break; 615 616 while (read + input_event_size() <= count && 617 evdev_fetch_next_event(client, &event)) { 618 619 if (input_event_to_user(buffer + read, &event)) 620 return -EFAULT; 621 622 read += input_event_size(); 623 } 624 625 if (read) 626 break; 627 628 if (!(file->f_flags & O_NONBLOCK)) { 629 error = wait_event_interruptible(evdev->wait, 630 client->packet_head != client->tail || 631 !evdev->exist || client->revoked); 632 if (error) 633 return error; 634 } 635 } 636 637 return read; 638 } 639 640 /* No kernel lock - fine */ 641 static __poll_t evdev_poll(struct file *file, poll_table *wait) 642 { 643 struct evdev_client *client = file->private_data; 644 struct evdev *evdev = client->evdev; 645 __poll_t mask; 646 647 poll_wait(file, &evdev->wait, wait); 648 649 if (evdev->exist && !client->revoked) 650 mask = EPOLLOUT | EPOLLWRNORM; 651 else 652 mask = EPOLLHUP | EPOLLERR; 653 654 if (client->packet_head != client->tail) 655 mask |= EPOLLIN | EPOLLRDNORM; 656 657 return mask; 658 } 659 660 #ifdef CONFIG_COMPAT 661 662 #define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8) 663 #define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1) 664 665 #ifdef __BIG_ENDIAN 666 static int bits_to_user(unsigned long *bits, unsigned int maxbit, 667 unsigned int maxlen, void __user *p, int compat) 668 { 669 int len, i; 670 671 if (compat) { 672 len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t); 673 if (len > maxlen) 674 len = maxlen; 675 676 for (i = 0; i < len / sizeof(compat_long_t); i++) 677 if (copy_to_user((compat_long_t __user *) p + i, 678 (compat_long_t *) bits + 679 i + 1 - ((i % 2) << 1), 680 sizeof(compat_long_t))) 681 return -EFAULT; 682 } else { 683 len = BITS_TO_LONGS(maxbit) * sizeof(long); 684 if (len > maxlen) 685 len = maxlen; 686 687 if (copy_to_user(p, bits, len)) 688 return -EFAULT; 689 } 690 691 return len; 692 } 693 694 static int bits_from_user(unsigned long *bits, unsigned int maxbit, 695 unsigned int maxlen, const void __user *p, int compat) 696 { 697 int len, i; 698 699 if (compat) { 700 if (maxlen % sizeof(compat_long_t)) 701 return -EINVAL; 702 703 len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t); 704 if (len > maxlen) 705 len = maxlen; 706 707 for (i = 0; i < len / sizeof(compat_long_t); i++) 708 if (copy_from_user((compat_long_t *) bits + 709 i + 1 - ((i % 2) << 1), 710 (compat_long_t __user *) p + i, 711 sizeof(compat_long_t))) 712 return -EFAULT; 713 if (i % 2) 714 *((compat_long_t *) bits + i - 1) = 0; 715 716 } else { 717 if (maxlen % sizeof(long)) 718 return -EINVAL; 719 720 len = BITS_TO_LONGS(maxbit) * sizeof(long); 721 if (len > maxlen) 722 len = maxlen; 723 724 if (copy_from_user(bits, p, len)) 725 return -EFAULT; 726 } 727 728 return len; 729 } 730 731 #else 732 733 static int bits_to_user(unsigned long *bits, unsigned int maxbit, 734 unsigned int maxlen, void __user *p, int compat) 735 { 736 int len = compat ? 737 BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) : 738 BITS_TO_LONGS(maxbit) * sizeof(long); 739 740 if (len > maxlen) 741 len = maxlen; 742 743 return copy_to_user(p, bits, len) ? -EFAULT : len; 744 } 745 746 static int bits_from_user(unsigned long *bits, unsigned int maxbit, 747 unsigned int maxlen, const void __user *p, int compat) 748 { 749 size_t chunk_size = compat ? sizeof(compat_long_t) : sizeof(long); 750 int len; 751 752 if (maxlen % chunk_size) 753 return -EINVAL; 754 755 len = compat ? BITS_TO_LONGS_COMPAT(maxbit) : BITS_TO_LONGS(maxbit); 756 len *= chunk_size; 757 if (len > maxlen) 758 len = maxlen; 759 760 return copy_from_user(bits, p, len) ? -EFAULT : len; 761 } 762 763 #endif /* __BIG_ENDIAN */ 764 765 #else 766 767 static int bits_to_user(unsigned long *bits, unsigned int maxbit, 768 unsigned int maxlen, void __user *p, int compat) 769 { 770 int len = BITS_TO_LONGS(maxbit) * sizeof(long); 771 772 if (len > maxlen) 773 len = maxlen; 774 775 return copy_to_user(p, bits, len) ? -EFAULT : len; 776 } 777 778 static int bits_from_user(unsigned long *bits, unsigned int maxbit, 779 unsigned int maxlen, const void __user *p, int compat) 780 { 781 int len; 782 783 if (maxlen % sizeof(long)) 784 return -EINVAL; 785 786 len = BITS_TO_LONGS(maxbit) * sizeof(long); 787 if (len > maxlen) 788 len = maxlen; 789 790 return copy_from_user(bits, p, len) ? -EFAULT : len; 791 } 792 793 #endif /* CONFIG_COMPAT */ 794 795 static int str_to_user(const char *str, unsigned int maxlen, void __user *p) 796 { 797 int len; 798 799 if (!str) 800 return -ENOENT; 801 802 len = strlen(str) + 1; 803 if (len > maxlen) 804 len = maxlen; 805 806 return copy_to_user(p, str, len) ? -EFAULT : len; 807 } 808 809 static int handle_eviocgbit(struct input_dev *dev, 810 unsigned int type, unsigned int size, 811 void __user *p, int compat_mode) 812 { 813 unsigned long *bits; 814 int len; 815 816 switch (type) { 817 818 case 0: bits = dev->evbit; len = EV_MAX; break; 819 case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; 820 case EV_REL: bits = dev->relbit; len = REL_MAX; break; 821 case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; 822 case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; 823 case EV_LED: bits = dev->ledbit; len = LED_MAX; break; 824 case EV_SND: bits = dev->sndbit; len = SND_MAX; break; 825 case EV_FF: bits = dev->ffbit; len = FF_MAX; break; 826 case EV_SW: bits = dev->swbit; len = SW_MAX; break; 827 default: return -EINVAL; 828 } 829 830 return bits_to_user(bits, len, size, p, compat_mode); 831 } 832 833 static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p) 834 { 835 struct input_keymap_entry ke = { 836 .len = sizeof(unsigned int), 837 .flags = 0, 838 }; 839 int __user *ip = (int __user *)p; 840 int error; 841 842 /* legacy case */ 843 if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 844 return -EFAULT; 845 846 error = input_get_keycode(dev, &ke); 847 if (error) 848 return error; 849 850 if (put_user(ke.keycode, ip + 1)) 851 return -EFAULT; 852 853 return 0; 854 } 855 856 static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p) 857 { 858 struct input_keymap_entry ke; 859 int error; 860 861 if (copy_from_user(&ke, p, sizeof(ke))) 862 return -EFAULT; 863 864 error = input_get_keycode(dev, &ke); 865 if (error) 866 return error; 867 868 if (copy_to_user(p, &ke, sizeof(ke))) 869 return -EFAULT; 870 871 return 0; 872 } 873 874 static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p) 875 { 876 struct input_keymap_entry ke = { 877 .len = sizeof(unsigned int), 878 .flags = 0, 879 }; 880 int __user *ip = (int __user *)p; 881 882 if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 883 return -EFAULT; 884 885 if (get_user(ke.keycode, ip + 1)) 886 return -EFAULT; 887 888 return input_set_keycode(dev, &ke); 889 } 890 891 static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p) 892 { 893 struct input_keymap_entry ke; 894 895 if (copy_from_user(&ke, p, sizeof(ke))) 896 return -EFAULT; 897 898 if (ke.len > sizeof(ke.scancode)) 899 return -EINVAL; 900 901 return input_set_keycode(dev, &ke); 902 } 903 904 /* 905 * If we transfer state to the user, we should flush all pending events 906 * of the same type from the client's queue. Otherwise, they might end up 907 * with duplicate events, which can screw up client's state tracking. 908 * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED 909 * event so user-space will notice missing events. 910 * 911 * LOCKING: 912 * We need to take event_lock before buffer_lock to avoid dead-locks. But we 913 * need the even_lock only to guarantee consistent state. We can safely release 914 * it while flushing the queue. This allows input-core to handle filters while 915 * we flush the queue. 916 */ 917 static int evdev_handle_get_val(struct evdev_client *client, 918 struct input_dev *dev, unsigned int type, 919 unsigned long *bits, unsigned int maxbit, 920 unsigned int maxlen, void __user *p, 921 int compat) 922 { 923 int ret; 924 unsigned long *mem; 925 926 mem = bitmap_alloc(maxbit, GFP_KERNEL); 927 if (!mem) 928 return -ENOMEM; 929 930 spin_lock_irq(&dev->event_lock); 931 spin_lock(&client->buffer_lock); 932 933 bitmap_copy(mem, bits, maxbit); 934 935 spin_unlock(&dev->event_lock); 936 937 __evdev_flush_queue(client, type); 938 939 spin_unlock_irq(&client->buffer_lock); 940 941 ret = bits_to_user(mem, maxbit, maxlen, p, compat); 942 if (ret < 0) 943 evdev_queue_syn_dropped(client); 944 945 bitmap_free(mem); 946 947 return ret; 948 } 949 950 static int evdev_handle_mt_request(struct input_dev *dev, 951 unsigned int size, 952 int __user *ip) 953 { 954 const struct input_mt *mt = dev->mt; 955 unsigned int code; 956 int max_slots; 957 int i; 958 959 if (get_user(code, &ip[0])) 960 return -EFAULT; 961 if (!mt || !input_is_mt_value(code)) 962 return -EINVAL; 963 964 max_slots = (size - sizeof(__u32)) / sizeof(__s32); 965 for (i = 0; i < mt->num_slots && i < max_slots; i++) { 966 int value = input_mt_get_value(&mt->slots[i], code); 967 if (put_user(value, &ip[1 + i])) 968 return -EFAULT; 969 } 970 971 return 0; 972 } 973 974 static int evdev_revoke(struct evdev *evdev, struct evdev_client *client, 975 struct file *file) 976 { 977 client->revoked = true; 978 evdev_ungrab(evdev, client); 979 input_flush_device(&evdev->handle, file); 980 wake_up_interruptible(&evdev->wait); 981 982 return 0; 983 } 984 985 /* must be called with evdev-mutex held */ 986 static int evdev_set_mask(struct evdev_client *client, 987 unsigned int type, 988 const void __user *codes, 989 u32 codes_size, 990 int compat) 991 { 992 unsigned long flags, *mask, *oldmask; 993 size_t cnt; 994 int error; 995 996 /* we allow unknown types and 'codes_size > size' for forward-compat */ 997 cnt = evdev_get_mask_cnt(type); 998 if (!cnt) 999 return 0; 1000 1001 mask = bitmap_zalloc(cnt, GFP_KERNEL); 1002 if (!mask) 1003 return -ENOMEM; 1004 1005 error = bits_from_user(mask, cnt - 1, codes_size, codes, compat); 1006 if (error < 0) { 1007 bitmap_free(mask); 1008 return error; 1009 } 1010 1011 spin_lock_irqsave(&client->buffer_lock, flags); 1012 oldmask = client->evmasks[type]; 1013 client->evmasks[type] = mask; 1014 spin_unlock_irqrestore(&client->buffer_lock, flags); 1015 1016 bitmap_free(oldmask); 1017 1018 return 0; 1019 } 1020 1021 /* must be called with evdev-mutex held */ 1022 static int evdev_get_mask(struct evdev_client *client, 1023 unsigned int type, 1024 void __user *codes, 1025 u32 codes_size, 1026 int compat) 1027 { 1028 unsigned long *mask; 1029 size_t cnt, size, xfer_size; 1030 int i; 1031 int error; 1032 1033 /* we allow unknown types and 'codes_size > size' for forward-compat */ 1034 cnt = evdev_get_mask_cnt(type); 1035 size = sizeof(unsigned long) * BITS_TO_LONGS(cnt); 1036 xfer_size = min_t(size_t, codes_size, size); 1037 1038 if (cnt > 0) { 1039 mask = client->evmasks[type]; 1040 if (mask) { 1041 error = bits_to_user(mask, cnt - 1, 1042 xfer_size, codes, compat); 1043 if (error < 0) 1044 return error; 1045 } else { 1046 /* fake mask with all bits set */ 1047 for (i = 0; i < xfer_size; i++) 1048 if (put_user(0xffU, (u8 __user *)codes + i)) 1049 return -EFAULT; 1050 } 1051 } 1052 1053 if (xfer_size < codes_size) 1054 if (clear_user(codes + xfer_size, codes_size - xfer_size)) 1055 return -EFAULT; 1056 1057 return 0; 1058 } 1059 1060 static long evdev_do_ioctl(struct file *file, unsigned int cmd, 1061 void __user *p, int compat_mode) 1062 { 1063 struct evdev_client *client = file->private_data; 1064 struct evdev *evdev = client->evdev; 1065 struct input_dev *dev = evdev->handle.dev; 1066 struct input_absinfo abs; 1067 struct input_mask mask; 1068 struct ff_effect effect; 1069 int __user *ip = (int __user *)p; 1070 unsigned int i, t, u, v; 1071 unsigned int size; 1072 int error; 1073 1074 /* First we check for fixed-length commands */ 1075 switch (cmd) { 1076 1077 case EVIOCGVERSION: 1078 return put_user(EV_VERSION, ip); 1079 1080 case EVIOCGID: 1081 if (copy_to_user(p, &dev->id, sizeof(struct input_id))) 1082 return -EFAULT; 1083 return 0; 1084 1085 case EVIOCGREP: 1086 if (!test_bit(EV_REP, dev->evbit)) 1087 return -ENOSYS; 1088 if (put_user(dev->rep[REP_DELAY], ip)) 1089 return -EFAULT; 1090 if (put_user(dev->rep[REP_PERIOD], ip + 1)) 1091 return -EFAULT; 1092 return 0; 1093 1094 case EVIOCSREP: 1095 if (!test_bit(EV_REP, dev->evbit)) 1096 return -ENOSYS; 1097 if (get_user(u, ip)) 1098 return -EFAULT; 1099 if (get_user(v, ip + 1)) 1100 return -EFAULT; 1101 1102 input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u); 1103 input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v); 1104 1105 return 0; 1106 1107 case EVIOCRMFF: 1108 return input_ff_erase(dev, (int)(unsigned long) p, file); 1109 1110 case EVIOCGEFFECTS: 1111 i = test_bit(EV_FF, dev->evbit) ? 1112 dev->ff->max_effects : 0; 1113 if (put_user(i, ip)) 1114 return -EFAULT; 1115 return 0; 1116 1117 case EVIOCGRAB: 1118 if (p) 1119 return evdev_grab(evdev, client); 1120 else 1121 return evdev_ungrab(evdev, client); 1122 1123 case EVIOCREVOKE: 1124 if (p) 1125 return -EINVAL; 1126 else 1127 return evdev_revoke(evdev, client, file); 1128 1129 case EVIOCGMASK: { 1130 void __user *codes_ptr; 1131 1132 if (copy_from_user(&mask, p, sizeof(mask))) 1133 return -EFAULT; 1134 1135 codes_ptr = (void __user *)(unsigned long)mask.codes_ptr; 1136 return evdev_get_mask(client, 1137 mask.type, codes_ptr, mask.codes_size, 1138 compat_mode); 1139 } 1140 1141 case EVIOCSMASK: { 1142 const void __user *codes_ptr; 1143 1144 if (copy_from_user(&mask, p, sizeof(mask))) 1145 return -EFAULT; 1146 1147 codes_ptr = (const void __user *)(unsigned long)mask.codes_ptr; 1148 return evdev_set_mask(client, 1149 mask.type, codes_ptr, mask.codes_size, 1150 compat_mode); 1151 } 1152 1153 case EVIOCSCLOCKID: 1154 if (copy_from_user(&i, p, sizeof(unsigned int))) 1155 return -EFAULT; 1156 1157 return evdev_set_clk_type(client, i); 1158 1159 case EVIOCGKEYCODE: 1160 return evdev_handle_get_keycode(dev, p); 1161 1162 case EVIOCSKEYCODE: 1163 return evdev_handle_set_keycode(dev, p); 1164 1165 case EVIOCGKEYCODE_V2: 1166 return evdev_handle_get_keycode_v2(dev, p); 1167 1168 case EVIOCSKEYCODE_V2: 1169 return evdev_handle_set_keycode_v2(dev, p); 1170 } 1171 1172 size = _IOC_SIZE(cmd); 1173 1174 /* Now check variable-length commands */ 1175 #define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) 1176 switch (EVIOC_MASK_SIZE(cmd)) { 1177 1178 case EVIOCGPROP(0): 1179 return bits_to_user(dev->propbit, INPUT_PROP_MAX, 1180 size, p, compat_mode); 1181 1182 case EVIOCGMTSLOTS(0): 1183 return evdev_handle_mt_request(dev, size, ip); 1184 1185 case EVIOCGKEY(0): 1186 return evdev_handle_get_val(client, dev, EV_KEY, dev->key, 1187 KEY_MAX, size, p, compat_mode); 1188 1189 case EVIOCGLED(0): 1190 return evdev_handle_get_val(client, dev, EV_LED, dev->led, 1191 LED_MAX, size, p, compat_mode); 1192 1193 case EVIOCGSND(0): 1194 return evdev_handle_get_val(client, dev, EV_SND, dev->snd, 1195 SND_MAX, size, p, compat_mode); 1196 1197 case EVIOCGSW(0): 1198 return evdev_handle_get_val(client, dev, EV_SW, dev->sw, 1199 SW_MAX, size, p, compat_mode); 1200 1201 case EVIOCGNAME(0): 1202 return str_to_user(dev->name, size, p); 1203 1204 case EVIOCGPHYS(0): 1205 return str_to_user(dev->phys, size, p); 1206 1207 case EVIOCGUNIQ(0): 1208 return str_to_user(dev->uniq, size, p); 1209 1210 case EVIOC_MASK_SIZE(EVIOCSFF): 1211 if (input_ff_effect_from_user(p, size, &effect)) 1212 return -EFAULT; 1213 1214 error = input_ff_upload(dev, &effect, file); 1215 if (error) 1216 return error; 1217 1218 if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) 1219 return -EFAULT; 1220 1221 return 0; 1222 } 1223 1224 /* Multi-number variable-length handlers */ 1225 if (_IOC_TYPE(cmd) != 'E') 1226 return -EINVAL; 1227 1228 if (_IOC_DIR(cmd) == _IOC_READ) { 1229 1230 if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) 1231 return handle_eviocgbit(dev, 1232 _IOC_NR(cmd) & EV_MAX, size, 1233 p, compat_mode); 1234 1235 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { 1236 1237 if (!dev->absinfo) 1238 return -EINVAL; 1239 1240 t = _IOC_NR(cmd) & ABS_MAX; 1241 abs = dev->absinfo[t]; 1242 1243 if (copy_to_user(p, &abs, min_t(size_t, 1244 size, sizeof(struct input_absinfo)))) 1245 return -EFAULT; 1246 1247 return 0; 1248 } 1249 } 1250 1251 if (_IOC_DIR(cmd) == _IOC_WRITE) { 1252 1253 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { 1254 1255 if (!dev->absinfo) 1256 return -EINVAL; 1257 1258 t = _IOC_NR(cmd) & ABS_MAX; 1259 1260 if (copy_from_user(&abs, p, min_t(size_t, 1261 size, sizeof(struct input_absinfo)))) 1262 return -EFAULT; 1263 1264 if (size < sizeof(struct input_absinfo)) 1265 abs.resolution = 0; 1266 1267 /* We can't change number of reserved MT slots */ 1268 if (t == ABS_MT_SLOT) 1269 return -EINVAL; 1270 1271 /* 1272 * Take event lock to ensure that we are not 1273 * changing device parameters in the middle 1274 * of event. 1275 */ 1276 spin_lock_irq(&dev->event_lock); 1277 dev->absinfo[t] = abs; 1278 spin_unlock_irq(&dev->event_lock); 1279 1280 return 0; 1281 } 1282 } 1283 1284 return -EINVAL; 1285 } 1286 1287 static long evdev_ioctl_handler(struct file *file, unsigned int cmd, 1288 void __user *p, int compat_mode) 1289 { 1290 struct evdev_client *client = file->private_data; 1291 struct evdev *evdev = client->evdev; 1292 int retval; 1293 1294 retval = mutex_lock_interruptible(&evdev->mutex); 1295 if (retval) 1296 return retval; 1297 1298 if (!evdev->exist || client->revoked) { 1299 retval = -ENODEV; 1300 goto out; 1301 } 1302 1303 retval = evdev_do_ioctl(file, cmd, p, compat_mode); 1304 1305 out: 1306 mutex_unlock(&evdev->mutex); 1307 return retval; 1308 } 1309 1310 static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1311 { 1312 return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0); 1313 } 1314 1315 #ifdef CONFIG_COMPAT 1316 static long evdev_ioctl_compat(struct file *file, 1317 unsigned int cmd, unsigned long arg) 1318 { 1319 return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1); 1320 } 1321 #endif 1322 1323 static const struct file_operations evdev_fops = { 1324 .owner = THIS_MODULE, 1325 .read = evdev_read, 1326 .write = evdev_write, 1327 .poll = evdev_poll, 1328 .open = evdev_open, 1329 .release = evdev_release, 1330 .unlocked_ioctl = evdev_ioctl, 1331 #ifdef CONFIG_COMPAT 1332 .compat_ioctl = evdev_ioctl_compat, 1333 #endif 1334 .fasync = evdev_fasync, 1335 .flush = evdev_flush, 1336 .llseek = no_llseek, 1337 }; 1338 1339 /* 1340 * Mark device non-existent. This disables writes, ioctls and 1341 * prevents new users from opening the device. Already posted 1342 * blocking reads will stay, however new ones will fail. 1343 */ 1344 static void evdev_mark_dead(struct evdev *evdev) 1345 { 1346 mutex_lock(&evdev->mutex); 1347 evdev->exist = false; 1348 mutex_unlock(&evdev->mutex); 1349 } 1350 1351 static void evdev_cleanup(struct evdev *evdev) 1352 { 1353 struct input_handle *handle = &evdev->handle; 1354 1355 evdev_mark_dead(evdev); 1356 evdev_hangup(evdev); 1357 1358 /* evdev is marked dead so no one else accesses evdev->open */ 1359 if (evdev->open) { 1360 input_flush_device(handle, NULL); 1361 input_close_device(handle); 1362 } 1363 } 1364 1365 /* 1366 * Create new evdev device. Note that input core serializes calls 1367 * to connect and disconnect. 1368 */ 1369 static int evdev_connect(struct input_handler *handler, struct input_dev *dev, 1370 const struct input_device_id *id) 1371 { 1372 struct evdev *evdev; 1373 int minor; 1374 int dev_no; 1375 int error; 1376 1377 minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true); 1378 if (minor < 0) { 1379 error = minor; 1380 pr_err("failed to reserve new minor: %d\n", error); 1381 return error; 1382 } 1383 1384 evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL); 1385 if (!evdev) { 1386 error = -ENOMEM; 1387 goto err_free_minor; 1388 } 1389 1390 INIT_LIST_HEAD(&evdev->client_list); 1391 spin_lock_init(&evdev->client_lock); 1392 mutex_init(&evdev->mutex); 1393 init_waitqueue_head(&evdev->wait); 1394 evdev->exist = true; 1395 1396 dev_no = minor; 1397 /* Normalize device number if it falls into legacy range */ 1398 if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS) 1399 dev_no -= EVDEV_MINOR_BASE; 1400 dev_set_name(&evdev->dev, "event%d", dev_no); 1401 1402 evdev->handle.dev = input_get_device(dev); 1403 evdev->handle.name = dev_name(&evdev->dev); 1404 evdev->handle.handler = handler; 1405 evdev->handle.private = evdev; 1406 1407 evdev->dev.devt = MKDEV(INPUT_MAJOR, minor); 1408 evdev->dev.class = &input_class; 1409 evdev->dev.parent = &dev->dev; 1410 evdev->dev.release = evdev_free; 1411 device_initialize(&evdev->dev); 1412 1413 error = input_register_handle(&evdev->handle); 1414 if (error) 1415 goto err_free_evdev; 1416 1417 cdev_init(&evdev->cdev, &evdev_fops); 1418 1419 error = cdev_device_add(&evdev->cdev, &evdev->dev); 1420 if (error) 1421 goto err_cleanup_evdev; 1422 1423 return 0; 1424 1425 err_cleanup_evdev: 1426 evdev_cleanup(evdev); 1427 input_unregister_handle(&evdev->handle); 1428 err_free_evdev: 1429 put_device(&evdev->dev); 1430 err_free_minor: 1431 input_free_minor(minor); 1432 return error; 1433 } 1434 1435 static void evdev_disconnect(struct input_handle *handle) 1436 { 1437 struct evdev *evdev = handle->private; 1438 1439 cdev_device_del(&evdev->cdev, &evdev->dev); 1440 evdev_cleanup(evdev); 1441 input_free_minor(MINOR(evdev->dev.devt)); 1442 input_unregister_handle(handle); 1443 put_device(&evdev->dev); 1444 } 1445 1446 static const struct input_device_id evdev_ids[] = { 1447 { .driver_info = 1 }, /* Matches all devices */ 1448 { }, /* Terminating zero entry */ 1449 }; 1450 1451 MODULE_DEVICE_TABLE(input, evdev_ids); 1452 1453 static struct input_handler evdev_handler = { 1454 .event = evdev_event, 1455 .events = evdev_events, 1456 .connect = evdev_connect, 1457 .disconnect = evdev_disconnect, 1458 .legacy_minors = true, 1459 .minor = EVDEV_MINOR_BASE, 1460 .name = "evdev", 1461 .id_table = evdev_ids, 1462 }; 1463 1464 static int __init evdev_init(void) 1465 { 1466 return input_register_handler(&evdev_handler); 1467 } 1468 1469 static void __exit evdev_exit(void) 1470 { 1471 input_unregister_handler(&evdev_handler); 1472 } 1473 1474 module_init(evdev_init); 1475 module_exit(evdev_exit); 1476 1477 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 1478 MODULE_DESCRIPTION("Input driver event char devices"); 1479 MODULE_LICENSE("GPL"); 1480