1 /* 2 * Event char devices, giving access to raw input device events. 3 * 4 * Copyright (c) 1999-2002 Vojtech Pavlik 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published by 8 * the Free Software Foundation. 9 */ 10 11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12 13 #define EVDEV_MINOR_BASE 64 14 #define EVDEV_MINORS 32 15 #define EVDEV_MIN_BUFFER_SIZE 64U 16 #define EVDEV_BUF_PACKETS 8 17 18 #include <linux/poll.h> 19 #include <linux/sched.h> 20 #include <linux/slab.h> 21 #include <linux/vmalloc.h> 22 #include <linux/mm.h> 23 #include <linux/module.h> 24 #include <linux/init.h> 25 #include <linux/input/mt.h> 26 #include <linux/major.h> 27 #include <linux/device.h> 28 #include <linux/cdev.h> 29 #include "input-compat.h" 30 31 enum evdev_clock_type { 32 EV_CLK_REAL = 0, 33 EV_CLK_MONO, 34 EV_CLK_BOOT, 35 EV_CLK_MAX 36 }; 37 38 struct evdev { 39 int open; 40 struct input_handle handle; 41 wait_queue_head_t wait; 42 struct evdev_client __rcu *grab; 43 struct list_head client_list; 44 spinlock_t client_lock; /* protects client_list */ 45 struct mutex mutex; 46 struct device dev; 47 struct cdev cdev; 48 bool exist; 49 }; 50 51 struct evdev_client { 52 unsigned int head; 53 unsigned int tail; 54 unsigned int packet_head; /* [future] position of the first element of next packet */ 55 spinlock_t buffer_lock; /* protects access to buffer, head and tail */ 56 struct fasync_struct *fasync; 57 struct evdev *evdev; 58 struct list_head node; 59 int clk_type; 60 bool revoked; 61 unsigned int bufsize; 62 struct input_event buffer[]; 63 }; 64 65 /* flush queued events of type @type, caller must hold client->buffer_lock */ 66 static void __evdev_flush_queue(struct evdev_client *client, unsigned int type) 67 { 68 unsigned int i, head, num; 69 unsigned int mask = client->bufsize - 1; 70 bool is_report; 71 struct input_event *ev; 72 73 BUG_ON(type == EV_SYN); 74 75 head = client->tail; 76 client->packet_head = client->tail; 77 78 /* init to 1 so a leading SYN_REPORT will not be dropped */ 79 num = 1; 80 81 for (i = client->tail; i != client->head; i = (i + 1) & mask) { 82 ev = &client->buffer[i]; 83 is_report = ev->type == EV_SYN && ev->code == SYN_REPORT; 84 85 if (ev->type == type) { 86 /* drop matched entry */ 87 continue; 88 } else if (is_report && !num) { 89 /* drop empty SYN_REPORT groups */ 90 continue; 91 } else if (head != i) { 92 /* move entry to fill the gap */ 93 client->buffer[head].time = ev->time; 94 client->buffer[head].type = ev->type; 95 client->buffer[head].code = ev->code; 96 client->buffer[head].value = ev->value; 97 } 98 99 num++; 100 head = (head + 1) & mask; 101 102 if (is_report) { 103 num = 0; 104 client->packet_head = head; 105 } 106 } 107 108 client->head = head; 109 } 110 111 static void __evdev_queue_syn_dropped(struct evdev_client *client) 112 { 113 struct input_event ev; 114 ktime_t time; 115 116 time = client->clk_type == EV_CLK_REAL ? 117 ktime_get_real() : 118 client->clk_type == EV_CLK_MONO ? 119 ktime_get() : 120 ktime_get_boottime(); 121 122 ev.time = ktime_to_timeval(time); 123 ev.type = EV_SYN; 124 ev.code = SYN_DROPPED; 125 ev.value = 0; 126 127 client->buffer[client->head++] = ev; 128 client->head &= client->bufsize - 1; 129 130 if (unlikely(client->head == client->tail)) { 131 /* drop queue but keep our SYN_DROPPED event */ 132 client->tail = (client->head - 1) & (client->bufsize - 1); 133 client->packet_head = client->tail; 134 } 135 } 136 137 static void evdev_queue_syn_dropped(struct evdev_client *client) 138 { 139 unsigned long flags; 140 141 spin_lock_irqsave(&client->buffer_lock, flags); 142 __evdev_queue_syn_dropped(client); 143 spin_unlock_irqrestore(&client->buffer_lock, flags); 144 } 145 146 static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid) 147 { 148 unsigned long flags; 149 150 if (client->clk_type == clkid) 151 return 0; 152 153 switch (clkid) { 154 155 case CLOCK_REALTIME: 156 client->clk_type = EV_CLK_REAL; 157 break; 158 case CLOCK_MONOTONIC: 159 client->clk_type = EV_CLK_MONO; 160 break; 161 case CLOCK_BOOTTIME: 162 client->clk_type = EV_CLK_BOOT; 163 break; 164 default: 165 return -EINVAL; 166 } 167 168 /* 169 * Flush pending events and queue SYN_DROPPED event, 170 * but only if the queue is not empty. 171 */ 172 spin_lock_irqsave(&client->buffer_lock, flags); 173 174 if (client->head != client->tail) { 175 client->packet_head = client->head = client->tail; 176 __evdev_queue_syn_dropped(client); 177 } 178 179 spin_unlock_irqrestore(&client->buffer_lock, flags); 180 181 return 0; 182 } 183 184 static void __pass_event(struct evdev_client *client, 185 const struct input_event *event) 186 { 187 client->buffer[client->head++] = *event; 188 client->head &= client->bufsize - 1; 189 190 if (unlikely(client->head == client->tail)) { 191 /* 192 * This effectively "drops" all unconsumed events, leaving 193 * EV_SYN/SYN_DROPPED plus the newest event in the queue. 194 */ 195 client->tail = (client->head - 2) & (client->bufsize - 1); 196 197 client->buffer[client->tail].time = event->time; 198 client->buffer[client->tail].type = EV_SYN; 199 client->buffer[client->tail].code = SYN_DROPPED; 200 client->buffer[client->tail].value = 0; 201 202 client->packet_head = client->tail; 203 } 204 205 if (event->type == EV_SYN && event->code == SYN_REPORT) { 206 client->packet_head = client->head; 207 kill_fasync(&client->fasync, SIGIO, POLL_IN); 208 } 209 } 210 211 static void evdev_pass_values(struct evdev_client *client, 212 const struct input_value *vals, unsigned int count, 213 ktime_t *ev_time) 214 { 215 struct evdev *evdev = client->evdev; 216 const struct input_value *v; 217 struct input_event event; 218 bool wakeup = false; 219 220 if (client->revoked) 221 return; 222 223 event.time = ktime_to_timeval(ev_time[client->clk_type]); 224 225 /* Interrupts are disabled, just acquire the lock. */ 226 spin_lock(&client->buffer_lock); 227 228 for (v = vals; v != vals + count; v++) { 229 event.type = v->type; 230 event.code = v->code; 231 event.value = v->value; 232 __pass_event(client, &event); 233 if (v->type == EV_SYN && v->code == SYN_REPORT) 234 wakeup = true; 235 } 236 237 spin_unlock(&client->buffer_lock); 238 239 if (wakeup) 240 wake_up_interruptible(&evdev->wait); 241 } 242 243 /* 244 * Pass incoming events to all connected clients. 245 */ 246 static void evdev_events(struct input_handle *handle, 247 const struct input_value *vals, unsigned int count) 248 { 249 struct evdev *evdev = handle->private; 250 struct evdev_client *client; 251 ktime_t ev_time[EV_CLK_MAX]; 252 253 ev_time[EV_CLK_MONO] = ktime_get(); 254 ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]); 255 ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO], 256 TK_OFFS_BOOT); 257 258 rcu_read_lock(); 259 260 client = rcu_dereference(evdev->grab); 261 262 if (client) 263 evdev_pass_values(client, vals, count, ev_time); 264 else 265 list_for_each_entry_rcu(client, &evdev->client_list, node) 266 evdev_pass_values(client, vals, count, ev_time); 267 268 rcu_read_unlock(); 269 } 270 271 /* 272 * Pass incoming event to all connected clients. 273 */ 274 static void evdev_event(struct input_handle *handle, 275 unsigned int type, unsigned int code, int value) 276 { 277 struct input_value vals[] = { { type, code, value } }; 278 279 evdev_events(handle, vals, 1); 280 } 281 282 static int evdev_fasync(int fd, struct file *file, int on) 283 { 284 struct evdev_client *client = file->private_data; 285 286 return fasync_helper(fd, file, on, &client->fasync); 287 } 288 289 static int evdev_flush(struct file *file, fl_owner_t id) 290 { 291 struct evdev_client *client = file->private_data; 292 struct evdev *evdev = client->evdev; 293 294 mutex_lock(&evdev->mutex); 295 296 if (evdev->exist && !client->revoked) 297 input_flush_device(&evdev->handle, file); 298 299 mutex_unlock(&evdev->mutex); 300 return 0; 301 } 302 303 static void evdev_free(struct device *dev) 304 { 305 struct evdev *evdev = container_of(dev, struct evdev, dev); 306 307 input_put_device(evdev->handle.dev); 308 kfree(evdev); 309 } 310 311 /* 312 * Grabs an event device (along with underlying input device). 313 * This function is called with evdev->mutex taken. 314 */ 315 static int evdev_grab(struct evdev *evdev, struct evdev_client *client) 316 { 317 int error; 318 319 if (evdev->grab) 320 return -EBUSY; 321 322 error = input_grab_device(&evdev->handle); 323 if (error) 324 return error; 325 326 rcu_assign_pointer(evdev->grab, client); 327 328 return 0; 329 } 330 331 static int evdev_ungrab(struct evdev *evdev, struct evdev_client *client) 332 { 333 struct evdev_client *grab = rcu_dereference_protected(evdev->grab, 334 lockdep_is_held(&evdev->mutex)); 335 336 if (grab != client) 337 return -EINVAL; 338 339 rcu_assign_pointer(evdev->grab, NULL); 340 synchronize_rcu(); 341 input_release_device(&evdev->handle); 342 343 return 0; 344 } 345 346 static void evdev_attach_client(struct evdev *evdev, 347 struct evdev_client *client) 348 { 349 spin_lock(&evdev->client_lock); 350 list_add_tail_rcu(&client->node, &evdev->client_list); 351 spin_unlock(&evdev->client_lock); 352 } 353 354 static void evdev_detach_client(struct evdev *evdev, 355 struct evdev_client *client) 356 { 357 spin_lock(&evdev->client_lock); 358 list_del_rcu(&client->node); 359 spin_unlock(&evdev->client_lock); 360 synchronize_rcu(); 361 } 362 363 static int evdev_open_device(struct evdev *evdev) 364 { 365 int retval; 366 367 retval = mutex_lock_interruptible(&evdev->mutex); 368 if (retval) 369 return retval; 370 371 if (!evdev->exist) 372 retval = -ENODEV; 373 else if (!evdev->open++) { 374 retval = input_open_device(&evdev->handle); 375 if (retval) 376 evdev->open--; 377 } 378 379 mutex_unlock(&evdev->mutex); 380 return retval; 381 } 382 383 static void evdev_close_device(struct evdev *evdev) 384 { 385 mutex_lock(&evdev->mutex); 386 387 if (evdev->exist && !--evdev->open) 388 input_close_device(&evdev->handle); 389 390 mutex_unlock(&evdev->mutex); 391 } 392 393 /* 394 * Wake up users waiting for IO so they can disconnect from 395 * dead device. 396 */ 397 static void evdev_hangup(struct evdev *evdev) 398 { 399 struct evdev_client *client; 400 401 spin_lock(&evdev->client_lock); 402 list_for_each_entry(client, &evdev->client_list, node) 403 kill_fasync(&client->fasync, SIGIO, POLL_HUP); 404 spin_unlock(&evdev->client_lock); 405 406 wake_up_interruptible(&evdev->wait); 407 } 408 409 static int evdev_release(struct inode *inode, struct file *file) 410 { 411 struct evdev_client *client = file->private_data; 412 struct evdev *evdev = client->evdev; 413 414 mutex_lock(&evdev->mutex); 415 evdev_ungrab(evdev, client); 416 mutex_unlock(&evdev->mutex); 417 418 evdev_detach_client(evdev, client); 419 420 kvfree(client); 421 422 evdev_close_device(evdev); 423 424 return 0; 425 } 426 427 static unsigned int evdev_compute_buffer_size(struct input_dev *dev) 428 { 429 unsigned int n_events = 430 max(dev->hint_events_per_packet * EVDEV_BUF_PACKETS, 431 EVDEV_MIN_BUFFER_SIZE); 432 433 return roundup_pow_of_two(n_events); 434 } 435 436 static int evdev_open(struct inode *inode, struct file *file) 437 { 438 struct evdev *evdev = container_of(inode->i_cdev, struct evdev, cdev); 439 unsigned int bufsize = evdev_compute_buffer_size(evdev->handle.dev); 440 unsigned int size = sizeof(struct evdev_client) + 441 bufsize * sizeof(struct input_event); 442 struct evdev_client *client; 443 int error; 444 445 client = kzalloc(size, GFP_KERNEL | __GFP_NOWARN); 446 if (!client) 447 client = vzalloc(size); 448 if (!client) 449 return -ENOMEM; 450 451 client->bufsize = bufsize; 452 spin_lock_init(&client->buffer_lock); 453 client->evdev = evdev; 454 evdev_attach_client(evdev, client); 455 456 error = evdev_open_device(evdev); 457 if (error) 458 goto err_free_client; 459 460 file->private_data = client; 461 nonseekable_open(inode, file); 462 463 return 0; 464 465 err_free_client: 466 evdev_detach_client(evdev, client); 467 kvfree(client); 468 return error; 469 } 470 471 static ssize_t evdev_write(struct file *file, const char __user *buffer, 472 size_t count, loff_t *ppos) 473 { 474 struct evdev_client *client = file->private_data; 475 struct evdev *evdev = client->evdev; 476 struct input_event event; 477 int retval = 0; 478 479 if (count != 0 && count < input_event_size()) 480 return -EINVAL; 481 482 retval = mutex_lock_interruptible(&evdev->mutex); 483 if (retval) 484 return retval; 485 486 if (!evdev->exist || client->revoked) { 487 retval = -ENODEV; 488 goto out; 489 } 490 491 while (retval + input_event_size() <= count) { 492 493 if (input_event_from_user(buffer + retval, &event)) { 494 retval = -EFAULT; 495 goto out; 496 } 497 retval += input_event_size(); 498 499 input_inject_event(&evdev->handle, 500 event.type, event.code, event.value); 501 } 502 503 out: 504 mutex_unlock(&evdev->mutex); 505 return retval; 506 } 507 508 static int evdev_fetch_next_event(struct evdev_client *client, 509 struct input_event *event) 510 { 511 int have_event; 512 513 spin_lock_irq(&client->buffer_lock); 514 515 have_event = client->packet_head != client->tail; 516 if (have_event) { 517 *event = client->buffer[client->tail++]; 518 client->tail &= client->bufsize - 1; 519 } 520 521 spin_unlock_irq(&client->buffer_lock); 522 523 return have_event; 524 } 525 526 static ssize_t evdev_read(struct file *file, char __user *buffer, 527 size_t count, loff_t *ppos) 528 { 529 struct evdev_client *client = file->private_data; 530 struct evdev *evdev = client->evdev; 531 struct input_event event; 532 size_t read = 0; 533 int error; 534 535 if (count != 0 && count < input_event_size()) 536 return -EINVAL; 537 538 for (;;) { 539 if (!evdev->exist || client->revoked) 540 return -ENODEV; 541 542 if (client->packet_head == client->tail && 543 (file->f_flags & O_NONBLOCK)) 544 return -EAGAIN; 545 546 /* 547 * count == 0 is special - no IO is done but we check 548 * for error conditions (see above). 549 */ 550 if (count == 0) 551 break; 552 553 while (read + input_event_size() <= count && 554 evdev_fetch_next_event(client, &event)) { 555 556 if (input_event_to_user(buffer + read, &event)) 557 return -EFAULT; 558 559 read += input_event_size(); 560 } 561 562 if (read) 563 break; 564 565 if (!(file->f_flags & O_NONBLOCK)) { 566 error = wait_event_interruptible(evdev->wait, 567 client->packet_head != client->tail || 568 !evdev->exist || client->revoked); 569 if (error) 570 return error; 571 } 572 } 573 574 return read; 575 } 576 577 /* No kernel lock - fine */ 578 static unsigned int evdev_poll(struct file *file, poll_table *wait) 579 { 580 struct evdev_client *client = file->private_data; 581 struct evdev *evdev = client->evdev; 582 unsigned int mask; 583 584 poll_wait(file, &evdev->wait, wait); 585 586 if (evdev->exist && !client->revoked) 587 mask = POLLOUT | POLLWRNORM; 588 else 589 mask = POLLHUP | POLLERR; 590 591 if (client->packet_head != client->tail) 592 mask |= POLLIN | POLLRDNORM; 593 594 return mask; 595 } 596 597 #ifdef CONFIG_COMPAT 598 599 #define BITS_PER_LONG_COMPAT (sizeof(compat_long_t) * 8) 600 #define BITS_TO_LONGS_COMPAT(x) ((((x) - 1) / BITS_PER_LONG_COMPAT) + 1) 601 602 #ifdef __BIG_ENDIAN 603 static int bits_to_user(unsigned long *bits, unsigned int maxbit, 604 unsigned int maxlen, void __user *p, int compat) 605 { 606 int len, i; 607 608 if (compat) { 609 len = BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t); 610 if (len > maxlen) 611 len = maxlen; 612 613 for (i = 0; i < len / sizeof(compat_long_t); i++) 614 if (copy_to_user((compat_long_t __user *) p + i, 615 (compat_long_t *) bits + 616 i + 1 - ((i % 2) << 1), 617 sizeof(compat_long_t))) 618 return -EFAULT; 619 } else { 620 len = BITS_TO_LONGS(maxbit) * sizeof(long); 621 if (len > maxlen) 622 len = maxlen; 623 624 if (copy_to_user(p, bits, len)) 625 return -EFAULT; 626 } 627 628 return len; 629 } 630 #else 631 static int bits_to_user(unsigned long *bits, unsigned int maxbit, 632 unsigned int maxlen, void __user *p, int compat) 633 { 634 int len = compat ? 635 BITS_TO_LONGS_COMPAT(maxbit) * sizeof(compat_long_t) : 636 BITS_TO_LONGS(maxbit) * sizeof(long); 637 638 if (len > maxlen) 639 len = maxlen; 640 641 return copy_to_user(p, bits, len) ? -EFAULT : len; 642 } 643 #endif /* __BIG_ENDIAN */ 644 645 #else 646 647 static int bits_to_user(unsigned long *bits, unsigned int maxbit, 648 unsigned int maxlen, void __user *p, int compat) 649 { 650 int len = BITS_TO_LONGS(maxbit) * sizeof(long); 651 652 if (len > maxlen) 653 len = maxlen; 654 655 return copy_to_user(p, bits, len) ? -EFAULT : len; 656 } 657 658 #endif /* CONFIG_COMPAT */ 659 660 static int str_to_user(const char *str, unsigned int maxlen, void __user *p) 661 { 662 int len; 663 664 if (!str) 665 return -ENOENT; 666 667 len = strlen(str) + 1; 668 if (len > maxlen) 669 len = maxlen; 670 671 return copy_to_user(p, str, len) ? -EFAULT : len; 672 } 673 674 static int handle_eviocgbit(struct input_dev *dev, 675 unsigned int type, unsigned int size, 676 void __user *p, int compat_mode) 677 { 678 unsigned long *bits; 679 int len; 680 681 switch (type) { 682 683 case 0: bits = dev->evbit; len = EV_MAX; break; 684 case EV_KEY: bits = dev->keybit; len = KEY_MAX; break; 685 case EV_REL: bits = dev->relbit; len = REL_MAX; break; 686 case EV_ABS: bits = dev->absbit; len = ABS_MAX; break; 687 case EV_MSC: bits = dev->mscbit; len = MSC_MAX; break; 688 case EV_LED: bits = dev->ledbit; len = LED_MAX; break; 689 case EV_SND: bits = dev->sndbit; len = SND_MAX; break; 690 case EV_FF: bits = dev->ffbit; len = FF_MAX; break; 691 case EV_SW: bits = dev->swbit; len = SW_MAX; break; 692 default: return -EINVAL; 693 } 694 695 return bits_to_user(bits, len, size, p, compat_mode); 696 } 697 698 static int evdev_handle_get_keycode(struct input_dev *dev, void __user *p) 699 { 700 struct input_keymap_entry ke = { 701 .len = sizeof(unsigned int), 702 .flags = 0, 703 }; 704 int __user *ip = (int __user *)p; 705 int error; 706 707 /* legacy case */ 708 if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 709 return -EFAULT; 710 711 error = input_get_keycode(dev, &ke); 712 if (error) 713 return error; 714 715 if (put_user(ke.keycode, ip + 1)) 716 return -EFAULT; 717 718 return 0; 719 } 720 721 static int evdev_handle_get_keycode_v2(struct input_dev *dev, void __user *p) 722 { 723 struct input_keymap_entry ke; 724 int error; 725 726 if (copy_from_user(&ke, p, sizeof(ke))) 727 return -EFAULT; 728 729 error = input_get_keycode(dev, &ke); 730 if (error) 731 return error; 732 733 if (copy_to_user(p, &ke, sizeof(ke))) 734 return -EFAULT; 735 736 return 0; 737 } 738 739 static int evdev_handle_set_keycode(struct input_dev *dev, void __user *p) 740 { 741 struct input_keymap_entry ke = { 742 .len = sizeof(unsigned int), 743 .flags = 0, 744 }; 745 int __user *ip = (int __user *)p; 746 747 if (copy_from_user(ke.scancode, p, sizeof(unsigned int))) 748 return -EFAULT; 749 750 if (get_user(ke.keycode, ip + 1)) 751 return -EFAULT; 752 753 return input_set_keycode(dev, &ke); 754 } 755 756 static int evdev_handle_set_keycode_v2(struct input_dev *dev, void __user *p) 757 { 758 struct input_keymap_entry ke; 759 760 if (copy_from_user(&ke, p, sizeof(ke))) 761 return -EFAULT; 762 763 if (ke.len > sizeof(ke.scancode)) 764 return -EINVAL; 765 766 return input_set_keycode(dev, &ke); 767 } 768 769 /* 770 * If we transfer state to the user, we should flush all pending events 771 * of the same type from the client's queue. Otherwise, they might end up 772 * with duplicate events, which can screw up client's state tracking. 773 * If bits_to_user fails after flushing the queue, we queue a SYN_DROPPED 774 * event so user-space will notice missing events. 775 * 776 * LOCKING: 777 * We need to take event_lock before buffer_lock to avoid dead-locks. But we 778 * need the even_lock only to guarantee consistent state. We can safely release 779 * it while flushing the queue. This allows input-core to handle filters while 780 * we flush the queue. 781 */ 782 static int evdev_handle_get_val(struct evdev_client *client, 783 struct input_dev *dev, unsigned int type, 784 unsigned long *bits, unsigned int maxbit, 785 unsigned int maxlen, void __user *p, 786 int compat) 787 { 788 int ret; 789 unsigned long *mem; 790 size_t len; 791 792 len = BITS_TO_LONGS(maxbit) * sizeof(unsigned long); 793 mem = kmalloc(len, GFP_KERNEL); 794 if (!mem) 795 return -ENOMEM; 796 797 spin_lock_irq(&dev->event_lock); 798 spin_lock(&client->buffer_lock); 799 800 memcpy(mem, bits, len); 801 802 spin_unlock(&dev->event_lock); 803 804 __evdev_flush_queue(client, type); 805 806 spin_unlock_irq(&client->buffer_lock); 807 808 ret = bits_to_user(mem, maxbit, maxlen, p, compat); 809 if (ret < 0) 810 evdev_queue_syn_dropped(client); 811 812 kfree(mem); 813 814 return ret; 815 } 816 817 static int evdev_handle_mt_request(struct input_dev *dev, 818 unsigned int size, 819 int __user *ip) 820 { 821 const struct input_mt *mt = dev->mt; 822 unsigned int code; 823 int max_slots; 824 int i; 825 826 if (get_user(code, &ip[0])) 827 return -EFAULT; 828 if (!mt || !input_is_mt_value(code)) 829 return -EINVAL; 830 831 max_slots = (size - sizeof(__u32)) / sizeof(__s32); 832 for (i = 0; i < mt->num_slots && i < max_slots; i++) { 833 int value = input_mt_get_value(&mt->slots[i], code); 834 if (put_user(value, &ip[1 + i])) 835 return -EFAULT; 836 } 837 838 return 0; 839 } 840 841 static int evdev_revoke(struct evdev *evdev, struct evdev_client *client, 842 struct file *file) 843 { 844 client->revoked = true; 845 evdev_ungrab(evdev, client); 846 input_flush_device(&evdev->handle, file); 847 wake_up_interruptible(&evdev->wait); 848 849 return 0; 850 } 851 852 static long evdev_do_ioctl(struct file *file, unsigned int cmd, 853 void __user *p, int compat_mode) 854 { 855 struct evdev_client *client = file->private_data; 856 struct evdev *evdev = client->evdev; 857 struct input_dev *dev = evdev->handle.dev; 858 struct input_absinfo abs; 859 struct ff_effect effect; 860 int __user *ip = (int __user *)p; 861 unsigned int i, t, u, v; 862 unsigned int size; 863 int error; 864 865 /* First we check for fixed-length commands */ 866 switch (cmd) { 867 868 case EVIOCGVERSION: 869 return put_user(EV_VERSION, ip); 870 871 case EVIOCGID: 872 if (copy_to_user(p, &dev->id, sizeof(struct input_id))) 873 return -EFAULT; 874 return 0; 875 876 case EVIOCGREP: 877 if (!test_bit(EV_REP, dev->evbit)) 878 return -ENOSYS; 879 if (put_user(dev->rep[REP_DELAY], ip)) 880 return -EFAULT; 881 if (put_user(dev->rep[REP_PERIOD], ip + 1)) 882 return -EFAULT; 883 return 0; 884 885 case EVIOCSREP: 886 if (!test_bit(EV_REP, dev->evbit)) 887 return -ENOSYS; 888 if (get_user(u, ip)) 889 return -EFAULT; 890 if (get_user(v, ip + 1)) 891 return -EFAULT; 892 893 input_inject_event(&evdev->handle, EV_REP, REP_DELAY, u); 894 input_inject_event(&evdev->handle, EV_REP, REP_PERIOD, v); 895 896 return 0; 897 898 case EVIOCRMFF: 899 return input_ff_erase(dev, (int)(unsigned long) p, file); 900 901 case EVIOCGEFFECTS: 902 i = test_bit(EV_FF, dev->evbit) ? 903 dev->ff->max_effects : 0; 904 if (put_user(i, ip)) 905 return -EFAULT; 906 return 0; 907 908 case EVIOCGRAB: 909 if (p) 910 return evdev_grab(evdev, client); 911 else 912 return evdev_ungrab(evdev, client); 913 914 case EVIOCREVOKE: 915 if (p) 916 return -EINVAL; 917 else 918 return evdev_revoke(evdev, client, file); 919 920 case EVIOCSCLOCKID: 921 if (copy_from_user(&i, p, sizeof(unsigned int))) 922 return -EFAULT; 923 924 return evdev_set_clk_type(client, i); 925 926 case EVIOCGKEYCODE: 927 return evdev_handle_get_keycode(dev, p); 928 929 case EVIOCSKEYCODE: 930 return evdev_handle_set_keycode(dev, p); 931 932 case EVIOCGKEYCODE_V2: 933 return evdev_handle_get_keycode_v2(dev, p); 934 935 case EVIOCSKEYCODE_V2: 936 return evdev_handle_set_keycode_v2(dev, p); 937 } 938 939 size = _IOC_SIZE(cmd); 940 941 /* Now check variable-length commands */ 942 #define EVIOC_MASK_SIZE(nr) ((nr) & ~(_IOC_SIZEMASK << _IOC_SIZESHIFT)) 943 switch (EVIOC_MASK_SIZE(cmd)) { 944 945 case EVIOCGPROP(0): 946 return bits_to_user(dev->propbit, INPUT_PROP_MAX, 947 size, p, compat_mode); 948 949 case EVIOCGMTSLOTS(0): 950 return evdev_handle_mt_request(dev, size, ip); 951 952 case EVIOCGKEY(0): 953 return evdev_handle_get_val(client, dev, EV_KEY, dev->key, 954 KEY_MAX, size, p, compat_mode); 955 956 case EVIOCGLED(0): 957 return evdev_handle_get_val(client, dev, EV_LED, dev->led, 958 LED_MAX, size, p, compat_mode); 959 960 case EVIOCGSND(0): 961 return evdev_handle_get_val(client, dev, EV_SND, dev->snd, 962 SND_MAX, size, p, compat_mode); 963 964 case EVIOCGSW(0): 965 return evdev_handle_get_val(client, dev, EV_SW, dev->sw, 966 SW_MAX, size, p, compat_mode); 967 968 case EVIOCGNAME(0): 969 return str_to_user(dev->name, size, p); 970 971 case EVIOCGPHYS(0): 972 return str_to_user(dev->phys, size, p); 973 974 case EVIOCGUNIQ(0): 975 return str_to_user(dev->uniq, size, p); 976 977 case EVIOC_MASK_SIZE(EVIOCSFF): 978 if (input_ff_effect_from_user(p, size, &effect)) 979 return -EFAULT; 980 981 error = input_ff_upload(dev, &effect, file); 982 if (error) 983 return error; 984 985 if (put_user(effect.id, &(((struct ff_effect __user *)p)->id))) 986 return -EFAULT; 987 988 return 0; 989 } 990 991 /* Multi-number variable-length handlers */ 992 if (_IOC_TYPE(cmd) != 'E') 993 return -EINVAL; 994 995 if (_IOC_DIR(cmd) == _IOC_READ) { 996 997 if ((_IOC_NR(cmd) & ~EV_MAX) == _IOC_NR(EVIOCGBIT(0, 0))) 998 return handle_eviocgbit(dev, 999 _IOC_NR(cmd) & EV_MAX, size, 1000 p, compat_mode); 1001 1002 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCGABS(0))) { 1003 1004 if (!dev->absinfo) 1005 return -EINVAL; 1006 1007 t = _IOC_NR(cmd) & ABS_MAX; 1008 abs = dev->absinfo[t]; 1009 1010 if (copy_to_user(p, &abs, min_t(size_t, 1011 size, sizeof(struct input_absinfo)))) 1012 return -EFAULT; 1013 1014 return 0; 1015 } 1016 } 1017 1018 if (_IOC_DIR(cmd) == _IOC_WRITE) { 1019 1020 if ((_IOC_NR(cmd) & ~ABS_MAX) == _IOC_NR(EVIOCSABS(0))) { 1021 1022 if (!dev->absinfo) 1023 return -EINVAL; 1024 1025 t = _IOC_NR(cmd) & ABS_MAX; 1026 1027 if (copy_from_user(&abs, p, min_t(size_t, 1028 size, sizeof(struct input_absinfo)))) 1029 return -EFAULT; 1030 1031 if (size < sizeof(struct input_absinfo)) 1032 abs.resolution = 0; 1033 1034 /* We can't change number of reserved MT slots */ 1035 if (t == ABS_MT_SLOT) 1036 return -EINVAL; 1037 1038 /* 1039 * Take event lock to ensure that we are not 1040 * changing device parameters in the middle 1041 * of event. 1042 */ 1043 spin_lock_irq(&dev->event_lock); 1044 dev->absinfo[t] = abs; 1045 spin_unlock_irq(&dev->event_lock); 1046 1047 return 0; 1048 } 1049 } 1050 1051 return -EINVAL; 1052 } 1053 1054 static long evdev_ioctl_handler(struct file *file, unsigned int cmd, 1055 void __user *p, int compat_mode) 1056 { 1057 struct evdev_client *client = file->private_data; 1058 struct evdev *evdev = client->evdev; 1059 int retval; 1060 1061 retval = mutex_lock_interruptible(&evdev->mutex); 1062 if (retval) 1063 return retval; 1064 1065 if (!evdev->exist || client->revoked) { 1066 retval = -ENODEV; 1067 goto out; 1068 } 1069 1070 retval = evdev_do_ioctl(file, cmd, p, compat_mode); 1071 1072 out: 1073 mutex_unlock(&evdev->mutex); 1074 return retval; 1075 } 1076 1077 static long evdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) 1078 { 1079 return evdev_ioctl_handler(file, cmd, (void __user *)arg, 0); 1080 } 1081 1082 #ifdef CONFIG_COMPAT 1083 static long evdev_ioctl_compat(struct file *file, 1084 unsigned int cmd, unsigned long arg) 1085 { 1086 return evdev_ioctl_handler(file, cmd, compat_ptr(arg), 1); 1087 } 1088 #endif 1089 1090 static const struct file_operations evdev_fops = { 1091 .owner = THIS_MODULE, 1092 .read = evdev_read, 1093 .write = evdev_write, 1094 .poll = evdev_poll, 1095 .open = evdev_open, 1096 .release = evdev_release, 1097 .unlocked_ioctl = evdev_ioctl, 1098 #ifdef CONFIG_COMPAT 1099 .compat_ioctl = evdev_ioctl_compat, 1100 #endif 1101 .fasync = evdev_fasync, 1102 .flush = evdev_flush, 1103 .llseek = no_llseek, 1104 }; 1105 1106 /* 1107 * Mark device non-existent. This disables writes, ioctls and 1108 * prevents new users from opening the device. Already posted 1109 * blocking reads will stay, however new ones will fail. 1110 */ 1111 static void evdev_mark_dead(struct evdev *evdev) 1112 { 1113 mutex_lock(&evdev->mutex); 1114 evdev->exist = false; 1115 mutex_unlock(&evdev->mutex); 1116 } 1117 1118 static void evdev_cleanup(struct evdev *evdev) 1119 { 1120 struct input_handle *handle = &evdev->handle; 1121 1122 evdev_mark_dead(evdev); 1123 evdev_hangup(evdev); 1124 1125 cdev_del(&evdev->cdev); 1126 1127 /* evdev is marked dead so no one else accesses evdev->open */ 1128 if (evdev->open) { 1129 input_flush_device(handle, NULL); 1130 input_close_device(handle); 1131 } 1132 } 1133 1134 /* 1135 * Create new evdev device. Note that input core serializes calls 1136 * to connect and disconnect. 1137 */ 1138 static int evdev_connect(struct input_handler *handler, struct input_dev *dev, 1139 const struct input_device_id *id) 1140 { 1141 struct evdev *evdev; 1142 int minor; 1143 int dev_no; 1144 int error; 1145 1146 minor = input_get_new_minor(EVDEV_MINOR_BASE, EVDEV_MINORS, true); 1147 if (minor < 0) { 1148 error = minor; 1149 pr_err("failed to reserve new minor: %d\n", error); 1150 return error; 1151 } 1152 1153 evdev = kzalloc(sizeof(struct evdev), GFP_KERNEL); 1154 if (!evdev) { 1155 error = -ENOMEM; 1156 goto err_free_minor; 1157 } 1158 1159 INIT_LIST_HEAD(&evdev->client_list); 1160 spin_lock_init(&evdev->client_lock); 1161 mutex_init(&evdev->mutex); 1162 init_waitqueue_head(&evdev->wait); 1163 evdev->exist = true; 1164 1165 dev_no = minor; 1166 /* Normalize device number if it falls into legacy range */ 1167 if (dev_no < EVDEV_MINOR_BASE + EVDEV_MINORS) 1168 dev_no -= EVDEV_MINOR_BASE; 1169 dev_set_name(&evdev->dev, "event%d", dev_no); 1170 1171 evdev->handle.dev = input_get_device(dev); 1172 evdev->handle.name = dev_name(&evdev->dev); 1173 evdev->handle.handler = handler; 1174 evdev->handle.private = evdev; 1175 1176 evdev->dev.devt = MKDEV(INPUT_MAJOR, minor); 1177 evdev->dev.class = &input_class; 1178 evdev->dev.parent = &dev->dev; 1179 evdev->dev.release = evdev_free; 1180 device_initialize(&evdev->dev); 1181 1182 error = input_register_handle(&evdev->handle); 1183 if (error) 1184 goto err_free_evdev; 1185 1186 cdev_init(&evdev->cdev, &evdev_fops); 1187 evdev->cdev.kobj.parent = &evdev->dev.kobj; 1188 error = cdev_add(&evdev->cdev, evdev->dev.devt, 1); 1189 if (error) 1190 goto err_unregister_handle; 1191 1192 error = device_add(&evdev->dev); 1193 if (error) 1194 goto err_cleanup_evdev; 1195 1196 return 0; 1197 1198 err_cleanup_evdev: 1199 evdev_cleanup(evdev); 1200 err_unregister_handle: 1201 input_unregister_handle(&evdev->handle); 1202 err_free_evdev: 1203 put_device(&evdev->dev); 1204 err_free_minor: 1205 input_free_minor(minor); 1206 return error; 1207 } 1208 1209 static void evdev_disconnect(struct input_handle *handle) 1210 { 1211 struct evdev *evdev = handle->private; 1212 1213 device_del(&evdev->dev); 1214 evdev_cleanup(evdev); 1215 input_free_minor(MINOR(evdev->dev.devt)); 1216 input_unregister_handle(handle); 1217 put_device(&evdev->dev); 1218 } 1219 1220 static const struct input_device_id evdev_ids[] = { 1221 { .driver_info = 1 }, /* Matches all devices */ 1222 { }, /* Terminating zero entry */ 1223 }; 1224 1225 MODULE_DEVICE_TABLE(input, evdev_ids); 1226 1227 static struct input_handler evdev_handler = { 1228 .event = evdev_event, 1229 .events = evdev_events, 1230 .connect = evdev_connect, 1231 .disconnect = evdev_disconnect, 1232 .legacy_minors = true, 1233 .minor = EVDEV_MINOR_BASE, 1234 .name = "evdev", 1235 .id_table = evdev_ids, 1236 }; 1237 1238 static int __init evdev_init(void) 1239 { 1240 return input_register_handler(&evdev_handler); 1241 } 1242 1243 static void __exit evdev_exit(void) 1244 { 1245 input_unregister_handler(&evdev_handler); 1246 } 1247 1248 module_init(evdev_init); 1249 module_exit(evdev_exit); 1250 1251 MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); 1252 MODULE_DESCRIPTION("Input driver event char devices"); 1253 MODULE_LICENSE("GPL"); 1254