1 /* 2 * USB Skeleton driver - 2.2 3 * 4 * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License as 8 * published by the Free Software Foundation, version 2. 9 * 10 * This driver is based on the 2.6.3 version of drivers/usb/usb-skeleton.c 11 * but has been rewritten to be easier to read and use. 12 * 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/errno.h> 17 #include <linux/init.h> 18 #include <linux/slab.h> 19 #include <linux/module.h> 20 #include <linux/kref.h> 21 #include <linux/uaccess.h> 22 #include <linux/usb.h> 23 #include <linux/mutex.h> 24 25 26 /* Define these values to match your devices */ 27 #define USB_SKEL_VENDOR_ID 0xfff0 28 #define USB_SKEL_PRODUCT_ID 0xfff0 29 30 static DEFINE_MUTEX(skel_mutex); 31 32 /* table of devices that work with this driver */ 33 static const struct usb_device_id skel_table[] = { 34 { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) }, 35 { } /* Terminating entry */ 36 }; 37 MODULE_DEVICE_TABLE(usb, skel_table); 38 39 40 /* Get a minor range for your devices from the usb maintainer */ 41 #define USB_SKEL_MINOR_BASE 192 42 43 /* our private defines. if this grows any larger, use your own .h file */ 44 #define MAX_TRANSFER (PAGE_SIZE - 512) 45 /* MAX_TRANSFER is chosen so that the VM is not stressed by 46 allocations > PAGE_SIZE and the number of packets in a page 47 is an integer 512 is the largest possible packet on EHCI */ 48 #define WRITES_IN_FLIGHT 8 49 /* arbitrarily chosen */ 50 51 /* Structure to hold all of our device specific stuff */ 52 struct usb_skel { 53 struct usb_device *udev; /* the usb device for this device */ 54 struct usb_interface *interface; /* the interface for this device */ 55 struct semaphore limit_sem; /* limiting the number of writes in progress */ 56 struct usb_anchor submitted; /* in case we need to retract our submissions */ 57 struct urb *bulk_in_urb; /* the urb to read data with */ 58 unsigned char *bulk_in_buffer; /* the buffer to receive data */ 59 size_t bulk_in_size; /* the size of the receive buffer */ 60 size_t bulk_in_filled; /* number of bytes in the buffer */ 61 size_t bulk_in_copied; /* already copied to user space */ 62 __u8 bulk_in_endpointAddr; /* the address of the bulk in endpoint */ 63 __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ 64 int errors; /* the last request tanked */ 65 int open_count; /* count the number of openers */ 66 bool ongoing_read; /* a read is going on */ 67 bool processed_urb; /* indicates we haven't processed the urb */ 68 spinlock_t err_lock; /* lock for errors */ 69 struct kref kref; 70 struct mutex io_mutex; /* synchronize I/O with disconnect */ 71 struct completion bulk_in_completion; /* to wait for an ongoing read */ 72 }; 73 #define to_skel_dev(d) container_of(d, struct usb_skel, kref) 74 75 static struct usb_driver skel_driver; 76 static void skel_draw_down(struct usb_skel *dev); 77 78 static void skel_delete(struct kref *kref) 79 { 80 struct usb_skel *dev = to_skel_dev(kref); 81 82 usb_free_urb(dev->bulk_in_urb); 83 usb_put_dev(dev->udev); 84 kfree(dev->bulk_in_buffer); 85 kfree(dev); 86 } 87 88 static int skel_open(struct inode *inode, struct file *file) 89 { 90 struct usb_skel *dev; 91 struct usb_interface *interface; 92 int subminor; 93 int retval = 0; 94 95 subminor = iminor(inode); 96 97 interface = usb_find_interface(&skel_driver, subminor); 98 if (!interface) { 99 err("%s - error, can't find device for minor %d", 100 __func__, subminor); 101 retval = -ENODEV; 102 goto exit; 103 } 104 105 mutex_lock(&skel_mutex); 106 dev = usb_get_intfdata(interface); 107 if (!dev) { 108 mutex_unlock(&skel_mutex); 109 retval = -ENODEV; 110 goto exit; 111 } 112 113 /* increment our usage count for the device */ 114 kref_get(&dev->kref); 115 mutex_unlock(&skel_mutex); 116 117 /* lock the device to allow correctly handling errors 118 * in resumption */ 119 mutex_lock(&dev->io_mutex); 120 if (!dev->interface) { 121 retval = -ENODEV; 122 goto out_err; 123 } 124 125 if (!dev->open_count++) { 126 retval = usb_autopm_get_interface(interface); 127 if (retval) { 128 dev->open_count--; 129 mutex_unlock(&dev->io_mutex); 130 kref_put(&dev->kref, skel_delete); 131 goto exit; 132 } 133 } /* else { //uncomment this block if you want exclusive open 134 retval = -EBUSY; 135 dev->open_count--; 136 mutex_unlock(&dev->io_mutex); 137 kref_put(&dev->kref, skel_delete); 138 goto exit; 139 } */ 140 /* prevent the device from being autosuspended */ 141 142 /* save our object in the file's private structure */ 143 file->private_data = dev; 144 145 out_err: 146 mutex_unlock(&dev->io_mutex); 147 if (retval) 148 kref_put(&dev->kref, skel_delete); 149 150 exit: 151 return retval; 152 } 153 154 static int skel_release(struct inode *inode, struct file *file) 155 { 156 struct usb_skel *dev; 157 158 dev = file->private_data; 159 if (dev == NULL) 160 return -ENODEV; 161 162 /* allow the device to be autosuspended */ 163 mutex_lock(&dev->io_mutex); 164 if (!--dev->open_count && dev->interface) 165 usb_autopm_put_interface(dev->interface); 166 mutex_unlock(&dev->io_mutex); 167 168 /* decrement the count on our device */ 169 kref_put(&dev->kref, skel_delete); 170 return 0; 171 } 172 173 static int skel_flush(struct file *file, fl_owner_t id) 174 { 175 struct usb_skel *dev; 176 int res; 177 178 dev = file->private_data; 179 if (dev == NULL) 180 return -ENODEV; 181 182 /* wait for io to stop */ 183 mutex_lock(&dev->io_mutex); 184 skel_draw_down(dev); 185 186 /* read out errors, leave subsequent opens a clean slate */ 187 spin_lock_irq(&dev->err_lock); 188 res = dev->errors ? (dev->errors == -EPIPE ? -EPIPE : -EIO) : 0; 189 dev->errors = 0; 190 spin_unlock_irq(&dev->err_lock); 191 192 mutex_unlock(&dev->io_mutex); 193 194 return res; 195 } 196 197 static void skel_read_bulk_callback(struct urb *urb) 198 { 199 struct usb_skel *dev; 200 201 dev = urb->context; 202 203 spin_lock(&dev->err_lock); 204 /* sync/async unlink faults aren't errors */ 205 if (urb->status) { 206 if (!(urb->status == -ENOENT || 207 urb->status == -ECONNRESET || 208 urb->status == -ESHUTDOWN)) 209 err("%s - nonzero write bulk status received: %d", 210 __func__, urb->status); 211 212 dev->errors = urb->status; 213 } else { 214 dev->bulk_in_filled = urb->actual_length; 215 } 216 dev->ongoing_read = 0; 217 spin_unlock(&dev->err_lock); 218 219 complete(&dev->bulk_in_completion); 220 } 221 222 static int skel_do_read_io(struct usb_skel *dev, size_t count) 223 { 224 int rv; 225 226 /* prepare a read */ 227 usb_fill_bulk_urb(dev->bulk_in_urb, 228 dev->udev, 229 usb_rcvbulkpipe(dev->udev, 230 dev->bulk_in_endpointAddr), 231 dev->bulk_in_buffer, 232 min(dev->bulk_in_size, count), 233 skel_read_bulk_callback, 234 dev); 235 /* tell everybody to leave the URB alone */ 236 spin_lock_irq(&dev->err_lock); 237 dev->ongoing_read = 1; 238 spin_unlock_irq(&dev->err_lock); 239 240 /* do it */ 241 rv = usb_submit_urb(dev->bulk_in_urb, GFP_KERNEL); 242 if (rv < 0) { 243 err("%s - failed submitting read urb, error %d", 244 __func__, rv); 245 dev->bulk_in_filled = 0; 246 rv = (rv == -ENOMEM) ? rv : -EIO; 247 spin_lock_irq(&dev->err_lock); 248 dev->ongoing_read = 0; 249 spin_unlock_irq(&dev->err_lock); 250 } 251 252 return rv; 253 } 254 255 static ssize_t skel_read(struct file *file, char *buffer, size_t count, 256 loff_t *ppos) 257 { 258 struct usb_skel *dev; 259 int rv; 260 bool ongoing_io; 261 262 dev = file->private_data; 263 264 /* if we cannot read at all, return EOF */ 265 if (!dev->bulk_in_urb || !count) 266 return 0; 267 268 /* no concurrent readers */ 269 rv = mutex_lock_interruptible(&dev->io_mutex); 270 if (rv < 0) 271 return rv; 272 273 if (!dev->interface) { /* disconnect() was called */ 274 rv = -ENODEV; 275 goto exit; 276 } 277 278 /* if IO is under way, we must not touch things */ 279 retry: 280 spin_lock_irq(&dev->err_lock); 281 ongoing_io = dev->ongoing_read; 282 spin_unlock_irq(&dev->err_lock); 283 284 if (ongoing_io) { 285 /* nonblocking IO shall not wait */ 286 if (file->f_flags & O_NONBLOCK) { 287 rv = -EAGAIN; 288 goto exit; 289 } 290 /* 291 * IO may take forever 292 * hence wait in an interruptible state 293 */ 294 rv = wait_for_completion_interruptible(&dev->bulk_in_completion); 295 if (rv < 0) 296 goto exit; 297 /* 298 * by waiting we also semiprocessed the urb 299 * we must finish now 300 */ 301 dev->bulk_in_copied = 0; 302 dev->processed_urb = 1; 303 } 304 305 if (!dev->processed_urb) { 306 /* 307 * the URB hasn't been processed 308 * do it now 309 */ 310 wait_for_completion(&dev->bulk_in_completion); 311 dev->bulk_in_copied = 0; 312 dev->processed_urb = 1; 313 } 314 315 /* errors must be reported */ 316 rv = dev->errors; 317 if (rv < 0) { 318 /* any error is reported once */ 319 dev->errors = 0; 320 /* to preserve notifications about reset */ 321 rv = (rv == -EPIPE) ? rv : -EIO; 322 /* no data to deliver */ 323 dev->bulk_in_filled = 0; 324 /* report it */ 325 goto exit; 326 } 327 328 /* 329 * if the buffer is filled we may satisfy the read 330 * else we need to start IO 331 */ 332 333 if (dev->bulk_in_filled) { 334 /* we had read data */ 335 size_t available = dev->bulk_in_filled - dev->bulk_in_copied; 336 size_t chunk = min(available, count); 337 338 if (!available) { 339 /* 340 * all data has been used 341 * actual IO needs to be done 342 */ 343 rv = skel_do_read_io(dev, count); 344 if (rv < 0) 345 goto exit; 346 else 347 goto retry; 348 } 349 /* 350 * data is available 351 * chunk tells us how much shall be copied 352 */ 353 354 if (copy_to_user(buffer, 355 dev->bulk_in_buffer + dev->bulk_in_copied, 356 chunk)) 357 rv = -EFAULT; 358 else 359 rv = chunk; 360 361 dev->bulk_in_copied += chunk; 362 363 /* 364 * if we are asked for more than we have, 365 * we start IO but don't wait 366 */ 367 if (available < count) 368 skel_do_read_io(dev, count - chunk); 369 } else { 370 /* no data in the buffer */ 371 rv = skel_do_read_io(dev, count); 372 if (rv < 0) 373 goto exit; 374 else if (!(file->f_flags & O_NONBLOCK)) 375 goto retry; 376 rv = -EAGAIN; 377 } 378 exit: 379 mutex_unlock(&dev->io_mutex); 380 return rv; 381 } 382 383 static void skel_write_bulk_callback(struct urb *urb) 384 { 385 struct usb_skel *dev; 386 387 dev = urb->context; 388 389 /* sync/async unlink faults aren't errors */ 390 if (urb->status) { 391 if (!(urb->status == -ENOENT || 392 urb->status == -ECONNRESET || 393 urb->status == -ESHUTDOWN)) 394 err("%s - nonzero write bulk status received: %d", 395 __func__, urb->status); 396 397 spin_lock(&dev->err_lock); 398 dev->errors = urb->status; 399 spin_unlock(&dev->err_lock); 400 } 401 402 /* free up our allocated buffer */ 403 usb_free_coherent(urb->dev, urb->transfer_buffer_length, 404 urb->transfer_buffer, urb->transfer_dma); 405 up(&dev->limit_sem); 406 } 407 408 static ssize_t skel_write(struct file *file, const char *user_buffer, 409 size_t count, loff_t *ppos) 410 { 411 struct usb_skel *dev; 412 int retval = 0; 413 struct urb *urb = NULL; 414 char *buf = NULL; 415 size_t writesize = min(count, (size_t)MAX_TRANSFER); 416 417 dev = file->private_data; 418 419 /* verify that we actually have some data to write */ 420 if (count == 0) 421 goto exit; 422 423 /* 424 * limit the number of URBs in flight to stop a user from using up all 425 * RAM 426 */ 427 if (!(file->f_flags & O_NONBLOCK)) { 428 if (down_interruptible(&dev->limit_sem)) { 429 retval = -ERESTARTSYS; 430 goto exit; 431 } 432 } else { 433 if (down_trylock(&dev->limit_sem)) { 434 retval = -EAGAIN; 435 goto exit; 436 } 437 } 438 439 spin_lock_irq(&dev->err_lock); 440 retval = dev->errors; 441 if (retval < 0) { 442 /* any error is reported once */ 443 dev->errors = 0; 444 /* to preserve notifications about reset */ 445 retval = (retval == -EPIPE) ? retval : -EIO; 446 } 447 spin_unlock_irq(&dev->err_lock); 448 if (retval < 0) 449 goto error; 450 451 /* create a urb, and a buffer for it, and copy the data to the urb */ 452 urb = usb_alloc_urb(0, GFP_KERNEL); 453 if (!urb) { 454 retval = -ENOMEM; 455 goto error; 456 } 457 458 buf = usb_alloc_coherent(dev->udev, writesize, GFP_KERNEL, 459 &urb->transfer_dma); 460 if (!buf) { 461 retval = -ENOMEM; 462 goto error; 463 } 464 465 if (copy_from_user(buf, user_buffer, writesize)) { 466 retval = -EFAULT; 467 goto error; 468 } 469 470 /* this lock makes sure we don't submit URBs to gone devices */ 471 mutex_lock(&dev->io_mutex); 472 if (!dev->interface) { /* disconnect() was called */ 473 mutex_unlock(&dev->io_mutex); 474 retval = -ENODEV; 475 goto error; 476 } 477 478 /* initialize the urb properly */ 479 usb_fill_bulk_urb(urb, dev->udev, 480 usb_sndbulkpipe(dev->udev, dev->bulk_out_endpointAddr), 481 buf, writesize, skel_write_bulk_callback, dev); 482 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; 483 usb_anchor_urb(urb, &dev->submitted); 484 485 /* send the data out the bulk port */ 486 retval = usb_submit_urb(urb, GFP_KERNEL); 487 mutex_unlock(&dev->io_mutex); 488 if (retval) { 489 err("%s - failed submitting write urb, error %d", __func__, 490 retval); 491 goto error_unanchor; 492 } 493 494 /* 495 * release our reference to this urb, the USB core will eventually free 496 * it entirely 497 */ 498 usb_free_urb(urb); 499 500 501 return writesize; 502 503 error_unanchor: 504 usb_unanchor_urb(urb); 505 error: 506 if (urb) { 507 usb_free_coherent(dev->udev, writesize, buf, urb->transfer_dma); 508 usb_free_urb(urb); 509 } 510 up(&dev->limit_sem); 511 512 exit: 513 return retval; 514 } 515 516 static const struct file_operations skel_fops = { 517 .owner = THIS_MODULE, 518 .read = skel_read, 519 .write = skel_write, 520 .open = skel_open, 521 .release = skel_release, 522 .flush = skel_flush, 523 .llseek = noop_llseek, 524 }; 525 526 /* 527 * usb class driver info in order to get a minor number from the usb core, 528 * and to have the device registered with the driver core 529 */ 530 static struct usb_class_driver skel_class = { 531 .name = "skel%d", 532 .fops = &skel_fops, 533 .minor_base = USB_SKEL_MINOR_BASE, 534 }; 535 536 static int skel_probe(struct usb_interface *interface, 537 const struct usb_device_id *id) 538 { 539 struct usb_skel *dev; 540 struct usb_host_interface *iface_desc; 541 struct usb_endpoint_descriptor *endpoint; 542 size_t buffer_size; 543 int i; 544 int retval = -ENOMEM; 545 546 /* allocate memory for our device state and initialize it */ 547 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 548 if (!dev) { 549 err("Out of memory"); 550 goto error; 551 } 552 kref_init(&dev->kref); 553 sema_init(&dev->limit_sem, WRITES_IN_FLIGHT); 554 mutex_init(&dev->io_mutex); 555 spin_lock_init(&dev->err_lock); 556 init_usb_anchor(&dev->submitted); 557 init_completion(&dev->bulk_in_completion); 558 559 dev->udev = usb_get_dev(interface_to_usbdev(interface)); 560 dev->interface = interface; 561 562 /* set up the endpoint information */ 563 /* use only the first bulk-in and bulk-out endpoints */ 564 iface_desc = interface->cur_altsetting; 565 for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { 566 endpoint = &iface_desc->endpoint[i].desc; 567 568 if (!dev->bulk_in_endpointAddr && 569 usb_endpoint_is_bulk_in(endpoint)) { 570 /* we found a bulk in endpoint */ 571 buffer_size = usb_endpoint_maxp(endpoint); 572 dev->bulk_in_size = buffer_size; 573 dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; 574 dev->bulk_in_buffer = kmalloc(buffer_size, GFP_KERNEL); 575 if (!dev->bulk_in_buffer) { 576 err("Could not allocate bulk_in_buffer"); 577 goto error; 578 } 579 dev->bulk_in_urb = usb_alloc_urb(0, GFP_KERNEL); 580 if (!dev->bulk_in_urb) { 581 err("Could not allocate bulk_in_urb"); 582 goto error; 583 } 584 } 585 586 if (!dev->bulk_out_endpointAddr && 587 usb_endpoint_is_bulk_out(endpoint)) { 588 /* we found a bulk out endpoint */ 589 dev->bulk_out_endpointAddr = endpoint->bEndpointAddress; 590 } 591 } 592 if (!(dev->bulk_in_endpointAddr && dev->bulk_out_endpointAddr)) { 593 err("Could not find both bulk-in and bulk-out endpoints"); 594 goto error; 595 } 596 597 /* save our data pointer in this interface device */ 598 usb_set_intfdata(interface, dev); 599 600 /* we can register the device now, as it is ready */ 601 retval = usb_register_dev(interface, &skel_class); 602 if (retval) { 603 /* something prevented us from registering this driver */ 604 err("Not able to get a minor for this device."); 605 usb_set_intfdata(interface, NULL); 606 goto error; 607 } 608 609 /* let the user know what node this device is now attached to */ 610 dev_info(&interface->dev, 611 "USB Skeleton device now attached to USBSkel-%d", 612 interface->minor); 613 return 0; 614 615 error: 616 if (dev) 617 /* this frees allocated memory */ 618 kref_put(&dev->kref, skel_delete); 619 return retval; 620 } 621 622 static void skel_disconnect(struct usb_interface *interface) 623 { 624 struct usb_skel *dev; 625 int minor = interface->minor; 626 627 dev = usb_get_intfdata(interface); 628 629 /* give back our minor */ 630 usb_deregister_dev(interface, &skel_class); 631 632 /* prevent more I/O from starting */ 633 mutex_lock(&dev->io_mutex); 634 dev->interface = NULL; 635 mutex_unlock(&dev->io_mutex); 636 637 usb_kill_anchored_urbs(&dev->submitted); 638 639 mutex_lock(&skel_mutex); 640 usb_set_intfdata(interface, NULL); 641 642 /* decrement our usage count */ 643 kref_put(&dev->kref, skel_delete); 644 mutex_unlock(&skel_mutex); 645 646 dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor); 647 } 648 649 static void skel_draw_down(struct usb_skel *dev) 650 { 651 int time; 652 653 time = usb_wait_anchor_empty_timeout(&dev->submitted, 1000); 654 if (!time) 655 usb_kill_anchored_urbs(&dev->submitted); 656 usb_kill_urb(dev->bulk_in_urb); 657 } 658 659 static int skel_suspend(struct usb_interface *intf, pm_message_t message) 660 { 661 struct usb_skel *dev = usb_get_intfdata(intf); 662 663 if (!dev) 664 return 0; 665 skel_draw_down(dev); 666 return 0; 667 } 668 669 static int skel_resume(struct usb_interface *intf) 670 { 671 return 0; 672 } 673 674 static int skel_pre_reset(struct usb_interface *intf) 675 { 676 struct usb_skel *dev = usb_get_intfdata(intf); 677 678 mutex_lock(&dev->io_mutex); 679 skel_draw_down(dev); 680 681 return 0; 682 } 683 684 static int skel_post_reset(struct usb_interface *intf) 685 { 686 struct usb_skel *dev = usb_get_intfdata(intf); 687 688 /* we are sure no URBs are active - no locking needed */ 689 dev->errors = -EPIPE; 690 mutex_unlock(&dev->io_mutex); 691 692 return 0; 693 } 694 695 static struct usb_driver skel_driver = { 696 .name = "skeleton", 697 .probe = skel_probe, 698 .disconnect = skel_disconnect, 699 .suspend = skel_suspend, 700 .resume = skel_resume, 701 .pre_reset = skel_pre_reset, 702 .post_reset = skel_post_reset, 703 .id_table = skel_table, 704 .supports_autosuspend = 1, 705 }; 706 707 static int __init usb_skel_init(void) 708 { 709 int result; 710 711 /* register this driver with the USB subsystem */ 712 result = usb_register(&skel_driver); 713 if (result) 714 err("usb_register failed. Error number %d", result); 715 716 return result; 717 } 718 719 static void __exit usb_skel_exit(void) 720 { 721 /* deregister this driver with the USB subsystem */ 722 usb_deregister(&skel_driver); 723 } 724 725 module_init(usb_skel_init); 726 module_exit(usb_skel_exit); 727 728 MODULE_LICENSE("GPL"); 729