1 /* 2 * User-space I/O driver support for HID subsystem 3 * Copyright (c) 2012 David Herrmann 4 */ 5 6 /* 7 * This program is free software; you can redistribute it and/or modify it 8 * under the terms of the GNU General Public License as published by the Free 9 * Software Foundation; either version 2 of the License, or (at your option) 10 * any later version. 11 */ 12 13 #include <linux/atomic.h> 14 #include <linux/compat.h> 15 #include <linux/device.h> 16 #include <linux/fs.h> 17 #include <linux/hid.h> 18 #include <linux/input.h> 19 #include <linux/miscdevice.h> 20 #include <linux/module.h> 21 #include <linux/mutex.h> 22 #include <linux/poll.h> 23 #include <linux/sched.h> 24 #include <linux/spinlock.h> 25 #include <linux/uhid.h> 26 #include <linux/wait.h> 27 28 #define UHID_NAME "uhid" 29 #define UHID_BUFSIZE 32 30 31 struct uhid_device { 32 struct mutex devlock; 33 bool running; 34 35 __u8 *rd_data; 36 uint rd_size; 37 38 struct hid_device *hid; 39 struct uhid_event input_buf; 40 41 wait_queue_head_t waitq; 42 spinlock_t qlock; 43 __u8 head; 44 __u8 tail; 45 struct uhid_event *outq[UHID_BUFSIZE]; 46 47 struct mutex report_lock; 48 wait_queue_head_t report_wait; 49 atomic_t report_done; 50 atomic_t report_id; 51 struct uhid_event report_buf; 52 }; 53 54 static struct miscdevice uhid_misc; 55 56 static void uhid_queue(struct uhid_device *uhid, struct uhid_event *ev) 57 { 58 __u8 newhead; 59 60 newhead = (uhid->head + 1) % UHID_BUFSIZE; 61 62 if (newhead != uhid->tail) { 63 uhid->outq[uhid->head] = ev; 64 uhid->head = newhead; 65 wake_up_interruptible(&uhid->waitq); 66 } else { 67 hid_warn(uhid->hid, "Output queue is full\n"); 68 kfree(ev); 69 } 70 } 71 72 static int uhid_queue_event(struct uhid_device *uhid, __u32 event) 73 { 74 unsigned long flags; 75 struct uhid_event *ev; 76 77 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 78 if (!ev) 79 return -ENOMEM; 80 81 ev->type = event; 82 83 spin_lock_irqsave(&uhid->qlock, flags); 84 uhid_queue(uhid, ev); 85 spin_unlock_irqrestore(&uhid->qlock, flags); 86 87 return 0; 88 } 89 90 static int uhid_hid_start(struct hid_device *hid) 91 { 92 struct uhid_device *uhid = hid->driver_data; 93 94 return uhid_queue_event(uhid, UHID_START); 95 } 96 97 static void uhid_hid_stop(struct hid_device *hid) 98 { 99 struct uhid_device *uhid = hid->driver_data; 100 101 hid->claimed = 0; 102 uhid_queue_event(uhid, UHID_STOP); 103 } 104 105 static int uhid_hid_open(struct hid_device *hid) 106 { 107 struct uhid_device *uhid = hid->driver_data; 108 109 return uhid_queue_event(uhid, UHID_OPEN); 110 } 111 112 static void uhid_hid_close(struct hid_device *hid) 113 { 114 struct uhid_device *uhid = hid->driver_data; 115 116 uhid_queue_event(uhid, UHID_CLOSE); 117 } 118 119 static int uhid_hid_parse(struct hid_device *hid) 120 { 121 struct uhid_device *uhid = hid->driver_data; 122 123 return hid_parse_report(hid, uhid->rd_data, uhid->rd_size); 124 } 125 126 static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum, 127 __u8 *buf, size_t count, unsigned char rtype) 128 { 129 struct uhid_device *uhid = hid->driver_data; 130 __u8 report_type; 131 struct uhid_event *ev; 132 unsigned long flags; 133 int ret; 134 size_t uninitialized_var(len); 135 struct uhid_feature_answer_req *req; 136 137 if (!uhid->running) 138 return -EIO; 139 140 switch (rtype) { 141 case HID_FEATURE_REPORT: 142 report_type = UHID_FEATURE_REPORT; 143 break; 144 case HID_OUTPUT_REPORT: 145 report_type = UHID_OUTPUT_REPORT; 146 break; 147 case HID_INPUT_REPORT: 148 report_type = UHID_INPUT_REPORT; 149 break; 150 default: 151 return -EINVAL; 152 } 153 154 ret = mutex_lock_interruptible(&uhid->report_lock); 155 if (ret) 156 return ret; 157 158 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 159 if (!ev) { 160 ret = -ENOMEM; 161 goto unlock; 162 } 163 164 spin_lock_irqsave(&uhid->qlock, flags); 165 ev->type = UHID_FEATURE; 166 ev->u.feature.id = atomic_inc_return(&uhid->report_id); 167 ev->u.feature.rnum = rnum; 168 ev->u.feature.rtype = report_type; 169 170 atomic_set(&uhid->report_done, 0); 171 uhid_queue(uhid, ev); 172 spin_unlock_irqrestore(&uhid->qlock, flags); 173 174 ret = wait_event_interruptible_timeout(uhid->report_wait, 175 atomic_read(&uhid->report_done), 5 * HZ); 176 177 /* 178 * Make sure "uhid->running" is cleared on shutdown before 179 * "uhid->report_done" is set. 180 */ 181 smp_rmb(); 182 if (!ret || !uhid->running) { 183 ret = -EIO; 184 } else if (ret < 0) { 185 ret = -ERESTARTSYS; 186 } else { 187 spin_lock_irqsave(&uhid->qlock, flags); 188 req = &uhid->report_buf.u.feature_answer; 189 190 if (req->err) { 191 ret = -EIO; 192 } else { 193 ret = 0; 194 len = min(count, 195 min_t(size_t, req->size, UHID_DATA_MAX)); 196 memcpy(buf, req->data, len); 197 } 198 199 spin_unlock_irqrestore(&uhid->qlock, flags); 200 } 201 202 atomic_set(&uhid->report_done, 1); 203 204 unlock: 205 mutex_unlock(&uhid->report_lock); 206 return ret ? ret : len; 207 } 208 209 static int uhid_hid_output_raw(struct hid_device *hid, __u8 *buf, size_t count, 210 unsigned char report_type) 211 { 212 struct uhid_device *uhid = hid->driver_data; 213 __u8 rtype; 214 unsigned long flags; 215 struct uhid_event *ev; 216 217 switch (report_type) { 218 case HID_FEATURE_REPORT: 219 rtype = UHID_FEATURE_REPORT; 220 break; 221 case HID_OUTPUT_REPORT: 222 rtype = UHID_OUTPUT_REPORT; 223 break; 224 default: 225 return -EINVAL; 226 } 227 228 if (count < 1 || count > UHID_DATA_MAX) 229 return -EINVAL; 230 231 ev = kzalloc(sizeof(*ev), GFP_KERNEL); 232 if (!ev) 233 return -ENOMEM; 234 235 ev->type = UHID_OUTPUT; 236 ev->u.output.size = count; 237 ev->u.output.rtype = rtype; 238 memcpy(ev->u.output.data, buf, count); 239 240 spin_lock_irqsave(&uhid->qlock, flags); 241 uhid_queue(uhid, ev); 242 spin_unlock_irqrestore(&uhid->qlock, flags); 243 244 return count; 245 } 246 247 static struct hid_ll_driver uhid_hid_driver = { 248 .start = uhid_hid_start, 249 .stop = uhid_hid_stop, 250 .open = uhid_hid_open, 251 .close = uhid_hid_close, 252 .parse = uhid_hid_parse, 253 }; 254 255 #ifdef CONFIG_COMPAT 256 257 /* Apparently we haven't stepped on these rakes enough times yet. */ 258 struct uhid_create_req_compat { 259 __u8 name[128]; 260 __u8 phys[64]; 261 __u8 uniq[64]; 262 263 compat_uptr_t rd_data; 264 __u16 rd_size; 265 266 __u16 bus; 267 __u32 vendor; 268 __u32 product; 269 __u32 version; 270 __u32 country; 271 } __attribute__((__packed__)); 272 273 static int uhid_event_from_user(const char __user *buffer, size_t len, 274 struct uhid_event *event) 275 { 276 if (is_compat_task()) { 277 u32 type; 278 279 if (get_user(type, buffer)) 280 return -EFAULT; 281 282 if (type == UHID_CREATE) { 283 /* 284 * This is our messed up request with compat pointer. 285 * It is largish (more than 256 bytes) so we better 286 * allocate it from the heap. 287 */ 288 struct uhid_create_req_compat *compat; 289 290 compat = kmalloc(sizeof(*compat), GFP_KERNEL); 291 if (!compat) 292 return -ENOMEM; 293 294 buffer += sizeof(type); 295 len -= sizeof(type); 296 if (copy_from_user(compat, buffer, 297 min(len, sizeof(*compat)))) { 298 kfree(compat); 299 return -EFAULT; 300 } 301 302 /* Shuffle the data over to proper structure */ 303 event->type = type; 304 305 memcpy(event->u.create.name, compat->name, 306 sizeof(compat->name)); 307 memcpy(event->u.create.phys, compat->phys, 308 sizeof(compat->phys)); 309 memcpy(event->u.create.uniq, compat->uniq, 310 sizeof(compat->uniq)); 311 312 event->u.create.rd_data = compat_ptr(compat->rd_data); 313 event->u.create.rd_size = compat->rd_size; 314 315 event->u.create.bus = compat->bus; 316 event->u.create.vendor = compat->vendor; 317 event->u.create.product = compat->product; 318 event->u.create.version = compat->version; 319 event->u.create.country = compat->country; 320 321 kfree(compat); 322 return 0; 323 } 324 /* All others can be copied directly */ 325 } 326 327 if (copy_from_user(event, buffer, min(len, sizeof(*event)))) 328 return -EFAULT; 329 330 return 0; 331 } 332 #else 333 static int uhid_event_from_user(const char __user *buffer, size_t len, 334 struct uhid_event *event) 335 { 336 if (copy_from_user(event, buffer, min(len, sizeof(*event)))) 337 return -EFAULT; 338 339 return 0; 340 } 341 #endif 342 343 static int uhid_dev_create(struct uhid_device *uhid, 344 const struct uhid_event *ev) 345 { 346 struct hid_device *hid; 347 int ret; 348 349 if (uhid->running) 350 return -EALREADY; 351 352 uhid->rd_size = ev->u.create.rd_size; 353 if (uhid->rd_size <= 0 || uhid->rd_size > HID_MAX_DESCRIPTOR_SIZE) 354 return -EINVAL; 355 356 uhid->rd_data = kmalloc(uhid->rd_size, GFP_KERNEL); 357 if (!uhid->rd_data) 358 return -ENOMEM; 359 360 if (copy_from_user(uhid->rd_data, ev->u.create.rd_data, 361 uhid->rd_size)) { 362 ret = -EFAULT; 363 goto err_free; 364 } 365 366 hid = hid_allocate_device(); 367 if (IS_ERR(hid)) { 368 ret = PTR_ERR(hid); 369 goto err_free; 370 } 371 372 strncpy(hid->name, ev->u.create.name, 127); 373 hid->name[127] = 0; 374 strncpy(hid->phys, ev->u.create.phys, 63); 375 hid->phys[63] = 0; 376 strncpy(hid->uniq, ev->u.create.uniq, 63); 377 hid->uniq[63] = 0; 378 379 hid->ll_driver = &uhid_hid_driver; 380 hid->hid_get_raw_report = uhid_hid_get_raw; 381 hid->hid_output_raw_report = uhid_hid_output_raw; 382 hid->bus = ev->u.create.bus; 383 hid->vendor = ev->u.create.vendor; 384 hid->product = ev->u.create.product; 385 hid->version = ev->u.create.version; 386 hid->country = ev->u.create.country; 387 hid->driver_data = uhid; 388 hid->dev.parent = uhid_misc.this_device; 389 390 uhid->hid = hid; 391 uhid->running = true; 392 393 ret = hid_add_device(hid); 394 if (ret) { 395 hid_err(hid, "Cannot register HID device\n"); 396 goto err_hid; 397 } 398 399 return 0; 400 401 err_hid: 402 hid_destroy_device(hid); 403 uhid->hid = NULL; 404 uhid->running = false; 405 err_free: 406 kfree(uhid->rd_data); 407 return ret; 408 } 409 410 static int uhid_dev_destroy(struct uhid_device *uhid) 411 { 412 if (!uhid->running) 413 return -EINVAL; 414 415 /* clear "running" before setting "report_done" */ 416 uhid->running = false; 417 smp_wmb(); 418 atomic_set(&uhid->report_done, 1); 419 wake_up_interruptible(&uhid->report_wait); 420 421 hid_destroy_device(uhid->hid); 422 kfree(uhid->rd_data); 423 424 return 0; 425 } 426 427 static int uhid_dev_input(struct uhid_device *uhid, struct uhid_event *ev) 428 { 429 if (!uhid->running) 430 return -EINVAL; 431 432 hid_input_report(uhid->hid, HID_INPUT_REPORT, ev->u.input.data, 433 min_t(size_t, ev->u.input.size, UHID_DATA_MAX), 0); 434 435 return 0; 436 } 437 438 static int uhid_dev_feature_answer(struct uhid_device *uhid, 439 struct uhid_event *ev) 440 { 441 unsigned long flags; 442 443 if (!uhid->running) 444 return -EINVAL; 445 446 spin_lock_irqsave(&uhid->qlock, flags); 447 448 /* id for old report; drop it silently */ 449 if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id) 450 goto unlock; 451 if (atomic_read(&uhid->report_done)) 452 goto unlock; 453 454 memcpy(&uhid->report_buf, ev, sizeof(*ev)); 455 atomic_set(&uhid->report_done, 1); 456 wake_up_interruptible(&uhid->report_wait); 457 458 unlock: 459 spin_unlock_irqrestore(&uhid->qlock, flags); 460 return 0; 461 } 462 463 static int uhid_char_open(struct inode *inode, struct file *file) 464 { 465 struct uhid_device *uhid; 466 467 uhid = kzalloc(sizeof(*uhid), GFP_KERNEL); 468 if (!uhid) 469 return -ENOMEM; 470 471 mutex_init(&uhid->devlock); 472 mutex_init(&uhid->report_lock); 473 spin_lock_init(&uhid->qlock); 474 init_waitqueue_head(&uhid->waitq); 475 init_waitqueue_head(&uhid->report_wait); 476 uhid->running = false; 477 atomic_set(&uhid->report_done, 1); 478 479 file->private_data = uhid; 480 nonseekable_open(inode, file); 481 482 return 0; 483 } 484 485 static int uhid_char_release(struct inode *inode, struct file *file) 486 { 487 struct uhid_device *uhid = file->private_data; 488 unsigned int i; 489 490 uhid_dev_destroy(uhid); 491 492 for (i = 0; i < UHID_BUFSIZE; ++i) 493 kfree(uhid->outq[i]); 494 495 kfree(uhid); 496 497 return 0; 498 } 499 500 static ssize_t uhid_char_read(struct file *file, char __user *buffer, 501 size_t count, loff_t *ppos) 502 { 503 struct uhid_device *uhid = file->private_data; 504 int ret; 505 unsigned long flags; 506 size_t len; 507 508 /* they need at least the "type" member of uhid_event */ 509 if (count < sizeof(__u32)) 510 return -EINVAL; 511 512 try_again: 513 if (file->f_flags & O_NONBLOCK) { 514 if (uhid->head == uhid->tail) 515 return -EAGAIN; 516 } else { 517 ret = wait_event_interruptible(uhid->waitq, 518 uhid->head != uhid->tail); 519 if (ret) 520 return ret; 521 } 522 523 ret = mutex_lock_interruptible(&uhid->devlock); 524 if (ret) 525 return ret; 526 527 if (uhid->head == uhid->tail) { 528 mutex_unlock(&uhid->devlock); 529 goto try_again; 530 } else { 531 len = min(count, sizeof(**uhid->outq)); 532 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) { 533 ret = -EFAULT; 534 } else { 535 kfree(uhid->outq[uhid->tail]); 536 uhid->outq[uhid->tail] = NULL; 537 538 spin_lock_irqsave(&uhid->qlock, flags); 539 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE; 540 spin_unlock_irqrestore(&uhid->qlock, flags); 541 } 542 } 543 544 mutex_unlock(&uhid->devlock); 545 return ret ? ret : len; 546 } 547 548 static ssize_t uhid_char_write(struct file *file, const char __user *buffer, 549 size_t count, loff_t *ppos) 550 { 551 struct uhid_device *uhid = file->private_data; 552 int ret; 553 size_t len; 554 555 /* we need at least the "type" member of uhid_event */ 556 if (count < sizeof(__u32)) 557 return -EINVAL; 558 559 ret = mutex_lock_interruptible(&uhid->devlock); 560 if (ret) 561 return ret; 562 563 memset(&uhid->input_buf, 0, sizeof(uhid->input_buf)); 564 len = min(count, sizeof(uhid->input_buf)); 565 566 ret = uhid_event_from_user(buffer, len, &uhid->input_buf); 567 if (ret) 568 goto unlock; 569 570 switch (uhid->input_buf.type) { 571 case UHID_CREATE: 572 ret = uhid_dev_create(uhid, &uhid->input_buf); 573 break; 574 case UHID_DESTROY: 575 ret = uhid_dev_destroy(uhid); 576 break; 577 case UHID_INPUT: 578 ret = uhid_dev_input(uhid, &uhid->input_buf); 579 break; 580 case UHID_FEATURE_ANSWER: 581 ret = uhid_dev_feature_answer(uhid, &uhid->input_buf); 582 break; 583 default: 584 ret = -EOPNOTSUPP; 585 } 586 587 unlock: 588 mutex_unlock(&uhid->devlock); 589 590 /* return "count" not "len" to not confuse the caller */ 591 return ret ? ret : count; 592 } 593 594 static unsigned int uhid_char_poll(struct file *file, poll_table *wait) 595 { 596 struct uhid_device *uhid = file->private_data; 597 598 poll_wait(file, &uhid->waitq, wait); 599 600 if (uhid->head != uhid->tail) 601 return POLLIN | POLLRDNORM; 602 603 return 0; 604 } 605 606 static const struct file_operations uhid_fops = { 607 .owner = THIS_MODULE, 608 .open = uhid_char_open, 609 .release = uhid_char_release, 610 .read = uhid_char_read, 611 .write = uhid_char_write, 612 .poll = uhid_char_poll, 613 .llseek = no_llseek, 614 }; 615 616 static struct miscdevice uhid_misc = { 617 .fops = &uhid_fops, 618 .minor = MISC_DYNAMIC_MINOR, 619 .name = UHID_NAME, 620 }; 621 622 static int __init uhid_init(void) 623 { 624 return misc_register(&uhid_misc); 625 } 626 627 static void __exit uhid_exit(void) 628 { 629 misc_deregister(&uhid_misc); 630 } 631 632 module_init(uhid_init); 633 module_exit(uhid_exit); 634 MODULE_LICENSE("GPL"); 635 MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); 636 MODULE_DESCRIPTION("User-space I/O driver support for HID subsystem"); 637 MODULE_ALIAS("devname:" UHID_NAME); 638