1 /* 2 * inode.c -- user mode filesystem api for usb gadget controllers 3 * 4 * Copyright (C) 2003-2004 David Brownell 5 * Copyright (C) 2003 Agilent Technologies 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License as published by 9 * the Free Software Foundation; either version 2 of the License, or 10 * (at your option) any later version. 11 */ 12 13 14 /* #define VERBOSE_DEBUG */ 15 16 #include <linux/init.h> 17 #include <linux/module.h> 18 #include <linux/fs.h> 19 #include <linux/pagemap.h> 20 #include <linux/uts.h> 21 #include <linux/wait.h> 22 #include <linux/compiler.h> 23 #include <linux/uaccess.h> 24 #include <linux/sched.h> 25 #include <linux/slab.h> 26 #include <linux/poll.h> 27 #include <linux/mmu_context.h> 28 #include <linux/aio.h> 29 #include <linux/uio.h> 30 #include <linux/refcount.h> 31 #include <linux/delay.h> 32 #include <linux/device.h> 33 #include <linux/moduleparam.h> 34 35 #include <linux/usb/gadgetfs.h> 36 #include <linux/usb/gadget.h> 37 38 39 /* 40 * The gadgetfs API maps each endpoint to a file descriptor so that you 41 * can use standard synchronous read/write calls for I/O. There's some 42 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode 43 * drivers show how this works in practice. You can also use AIO to 44 * eliminate I/O gaps between requests, to help when streaming data. 45 * 46 * Key parts that must be USB-specific are protocols defining how the 47 * read/write operations relate to the hardware state machines. There 48 * are two types of files. One type is for the device, implementing ep0. 49 * The other type is for each IN or OUT endpoint. In both cases, the 50 * user mode driver must configure the hardware before using it. 51 * 52 * - First, dev_config() is called when /dev/gadget/$CHIP is configured 53 * (by writing configuration and device descriptors). Afterwards it 54 * may serve as a source of device events, used to handle all control 55 * requests other than basic enumeration. 56 * 57 * - Then, after a SET_CONFIGURATION control request, ep_config() is 58 * called when each /dev/gadget/ep* file is configured (by writing 59 * endpoint descriptors). Afterwards these files are used to write() 60 * IN data or to read() OUT data. To halt the endpoint, a "wrong 61 * direction" request is issued (like reading an IN endpoint). 62 * 63 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe 64 * not possible on all hardware. For example, precise fault handling with 65 * respect to data left in endpoint fifos after aborted operations; or 66 * selective clearing of endpoint halts, to implement SET_INTERFACE. 67 */ 68 69 #define DRIVER_DESC "USB Gadget filesystem" 70 #define DRIVER_VERSION "24 Aug 2004" 71 72 static const char driver_desc [] = DRIVER_DESC; 73 static const char shortname [] = "gadgetfs"; 74 75 MODULE_DESCRIPTION (DRIVER_DESC); 76 MODULE_AUTHOR ("David Brownell"); 77 MODULE_LICENSE ("GPL"); 78 79 static int ep_open(struct inode *, struct file *); 80 81 82 /*----------------------------------------------------------------------*/ 83 84 #define GADGETFS_MAGIC 0xaee71ee7 85 86 /* /dev/gadget/$CHIP represents ep0 and the whole device */ 87 enum ep0_state { 88 /* DISABLED is the initial state. */ 89 STATE_DEV_DISABLED = 0, 90 91 /* Only one open() of /dev/gadget/$CHIP; only one file tracks 92 * ep0/device i/o modes and binding to the controller. Driver 93 * must always write descriptors to initialize the device, then 94 * the device becomes UNCONNECTED until enumeration. 95 */ 96 STATE_DEV_OPENED, 97 98 /* From then on, ep0 fd is in either of two basic modes: 99 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it 100 * - SETUP: read/write will transfer control data and succeed; 101 * or if "wrong direction", performs protocol stall 102 */ 103 STATE_DEV_UNCONNECTED, 104 STATE_DEV_CONNECTED, 105 STATE_DEV_SETUP, 106 107 /* UNBOUND means the driver closed ep0, so the device won't be 108 * accessible again (DEV_DISABLED) until all fds are closed. 109 */ 110 STATE_DEV_UNBOUND, 111 }; 112 113 /* enough for the whole queue: most events invalidate others */ 114 #define N_EVENT 5 115 116 struct dev_data { 117 spinlock_t lock; 118 refcount_t count; 119 int udc_usage; 120 enum ep0_state state; /* P: lock */ 121 struct usb_gadgetfs_event event [N_EVENT]; 122 unsigned ev_next; 123 struct fasync_struct *fasync; 124 u8 current_config; 125 126 /* drivers reading ep0 MUST handle control requests (SETUP) 127 * reported that way; else the host will time out. 128 */ 129 unsigned usermode_setup : 1, 130 setup_in : 1, 131 setup_can_stall : 1, 132 setup_out_ready : 1, 133 setup_out_error : 1, 134 setup_abort : 1, 135 gadget_registered : 1; 136 unsigned setup_wLength; 137 138 /* the rest is basically write-once */ 139 struct usb_config_descriptor *config, *hs_config; 140 struct usb_device_descriptor *dev; 141 struct usb_request *req; 142 struct usb_gadget *gadget; 143 struct list_head epfiles; 144 void *buf; 145 wait_queue_head_t wait; 146 struct super_block *sb; 147 struct dentry *dentry; 148 149 /* except this scratch i/o buffer for ep0 */ 150 u8 rbuf [256]; 151 }; 152 153 static inline void get_dev (struct dev_data *data) 154 { 155 refcount_inc (&data->count); 156 } 157 158 static void put_dev (struct dev_data *data) 159 { 160 if (likely (!refcount_dec_and_test (&data->count))) 161 return; 162 /* needs no more cleanup */ 163 BUG_ON (waitqueue_active (&data->wait)); 164 kfree (data); 165 } 166 167 static struct dev_data *dev_new (void) 168 { 169 struct dev_data *dev; 170 171 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 172 if (!dev) 173 return NULL; 174 dev->state = STATE_DEV_DISABLED; 175 refcount_set (&dev->count, 1); 176 spin_lock_init (&dev->lock); 177 INIT_LIST_HEAD (&dev->epfiles); 178 init_waitqueue_head (&dev->wait); 179 return dev; 180 } 181 182 /*----------------------------------------------------------------------*/ 183 184 /* other /dev/gadget/$ENDPOINT files represent endpoints */ 185 enum ep_state { 186 STATE_EP_DISABLED = 0, 187 STATE_EP_READY, 188 STATE_EP_ENABLED, 189 STATE_EP_UNBOUND, 190 }; 191 192 struct ep_data { 193 struct mutex lock; 194 enum ep_state state; 195 refcount_t count; 196 struct dev_data *dev; 197 /* must hold dev->lock before accessing ep or req */ 198 struct usb_ep *ep; 199 struct usb_request *req; 200 ssize_t status; 201 char name [16]; 202 struct usb_endpoint_descriptor desc, hs_desc; 203 struct list_head epfiles; 204 wait_queue_head_t wait; 205 struct dentry *dentry; 206 }; 207 208 static inline void get_ep (struct ep_data *data) 209 { 210 refcount_inc (&data->count); 211 } 212 213 static void put_ep (struct ep_data *data) 214 { 215 if (likely (!refcount_dec_and_test (&data->count))) 216 return; 217 put_dev (data->dev); 218 /* needs no more cleanup */ 219 BUG_ON (!list_empty (&data->epfiles)); 220 BUG_ON (waitqueue_active (&data->wait)); 221 kfree (data); 222 } 223 224 /*----------------------------------------------------------------------*/ 225 226 /* most "how to use the hardware" policy choices are in userspace: 227 * mapping endpoint roles (which the driver needs) to the capabilities 228 * which the usb controller has. most of those capabilities are exposed 229 * implicitly, starting with the driver name and then endpoint names. 230 */ 231 232 static const char *CHIP; 233 234 /*----------------------------------------------------------------------*/ 235 236 /* NOTE: don't use dev_printk calls before binding to the gadget 237 * at the end of ep0 configuration, or after unbind. 238 */ 239 240 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */ 241 #define xprintk(d,level,fmt,args...) \ 242 printk(level "%s: " fmt , shortname , ## args) 243 244 #ifdef DEBUG 245 #define DBG(dev,fmt,args...) \ 246 xprintk(dev , KERN_DEBUG , fmt , ## args) 247 #else 248 #define DBG(dev,fmt,args...) \ 249 do { } while (0) 250 #endif /* DEBUG */ 251 252 #ifdef VERBOSE_DEBUG 253 #define VDEBUG DBG 254 #else 255 #define VDEBUG(dev,fmt,args...) \ 256 do { } while (0) 257 #endif /* DEBUG */ 258 259 #define ERROR(dev,fmt,args...) \ 260 xprintk(dev , KERN_ERR , fmt , ## args) 261 #define INFO(dev,fmt,args...) \ 262 xprintk(dev , KERN_INFO , fmt , ## args) 263 264 265 /*----------------------------------------------------------------------*/ 266 267 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso) 268 * 269 * After opening, configure non-control endpoints. Then use normal 270 * stream read() and write() requests; and maybe ioctl() to get more 271 * precise FIFO status when recovering from cancellation. 272 */ 273 274 static void epio_complete (struct usb_ep *ep, struct usb_request *req) 275 { 276 struct ep_data *epdata = ep->driver_data; 277 278 if (!req->context) 279 return; 280 if (req->status) 281 epdata->status = req->status; 282 else 283 epdata->status = req->actual; 284 complete ((struct completion *)req->context); 285 } 286 287 /* tasklock endpoint, returning when it's connected. 288 * still need dev->lock to use epdata->ep. 289 */ 290 static int 291 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) 292 { 293 int val; 294 295 if (f_flags & O_NONBLOCK) { 296 if (!mutex_trylock(&epdata->lock)) 297 goto nonblock; 298 if (epdata->state != STATE_EP_ENABLED && 299 (!is_write || epdata->state != STATE_EP_READY)) { 300 mutex_unlock(&epdata->lock); 301 nonblock: 302 val = -EAGAIN; 303 } else 304 val = 0; 305 return val; 306 } 307 308 val = mutex_lock_interruptible(&epdata->lock); 309 if (val < 0) 310 return val; 311 312 switch (epdata->state) { 313 case STATE_EP_ENABLED: 314 return 0; 315 case STATE_EP_READY: /* not configured yet */ 316 if (is_write) 317 return 0; 318 // FALLTHRU 319 case STATE_EP_UNBOUND: /* clean disconnect */ 320 break; 321 // case STATE_EP_DISABLED: /* "can't happen" */ 322 default: /* error! */ 323 pr_debug ("%s: ep %p not available, state %d\n", 324 shortname, epdata, epdata->state); 325 } 326 mutex_unlock(&epdata->lock); 327 return -ENODEV; 328 } 329 330 static ssize_t 331 ep_io (struct ep_data *epdata, void *buf, unsigned len) 332 { 333 DECLARE_COMPLETION_ONSTACK (done); 334 int value; 335 336 spin_lock_irq (&epdata->dev->lock); 337 if (likely (epdata->ep != NULL)) { 338 struct usb_request *req = epdata->req; 339 340 req->context = &done; 341 req->complete = epio_complete; 342 req->buf = buf; 343 req->length = len; 344 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC); 345 } else 346 value = -ENODEV; 347 spin_unlock_irq (&epdata->dev->lock); 348 349 if (likely (value == 0)) { 350 value = wait_event_interruptible (done.wait, done.done); 351 if (value != 0) { 352 spin_lock_irq (&epdata->dev->lock); 353 if (likely (epdata->ep != NULL)) { 354 DBG (epdata->dev, "%s i/o interrupted\n", 355 epdata->name); 356 usb_ep_dequeue (epdata->ep, epdata->req); 357 spin_unlock_irq (&epdata->dev->lock); 358 359 wait_event (done.wait, done.done); 360 if (epdata->status == -ECONNRESET) 361 epdata->status = -EINTR; 362 } else { 363 spin_unlock_irq (&epdata->dev->lock); 364 365 DBG (epdata->dev, "endpoint gone\n"); 366 epdata->status = -ENODEV; 367 } 368 } 369 return epdata->status; 370 } 371 return value; 372 } 373 374 static int 375 ep_release (struct inode *inode, struct file *fd) 376 { 377 struct ep_data *data = fd->private_data; 378 int value; 379 380 value = mutex_lock_interruptible(&data->lock); 381 if (value < 0) 382 return value; 383 384 /* clean up if this can be reopened */ 385 if (data->state != STATE_EP_UNBOUND) { 386 data->state = STATE_EP_DISABLED; 387 data->desc.bDescriptorType = 0; 388 data->hs_desc.bDescriptorType = 0; 389 usb_ep_disable(data->ep); 390 } 391 mutex_unlock(&data->lock); 392 put_ep (data); 393 return 0; 394 } 395 396 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) 397 { 398 struct ep_data *data = fd->private_data; 399 int status; 400 401 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) 402 return status; 403 404 spin_lock_irq (&data->dev->lock); 405 if (likely (data->ep != NULL)) { 406 switch (code) { 407 case GADGETFS_FIFO_STATUS: 408 status = usb_ep_fifo_status (data->ep); 409 break; 410 case GADGETFS_FIFO_FLUSH: 411 usb_ep_fifo_flush (data->ep); 412 break; 413 case GADGETFS_CLEAR_HALT: 414 status = usb_ep_clear_halt (data->ep); 415 break; 416 default: 417 status = -ENOTTY; 418 } 419 } else 420 status = -ENODEV; 421 spin_unlock_irq (&data->dev->lock); 422 mutex_unlock(&data->lock); 423 return status; 424 } 425 426 /*----------------------------------------------------------------------*/ 427 428 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */ 429 430 struct kiocb_priv { 431 struct usb_request *req; 432 struct ep_data *epdata; 433 struct kiocb *iocb; 434 struct mm_struct *mm; 435 struct work_struct work; 436 void *buf; 437 struct iov_iter to; 438 const void *to_free; 439 unsigned actual; 440 }; 441 442 static int ep_aio_cancel(struct kiocb *iocb) 443 { 444 struct kiocb_priv *priv = iocb->private; 445 struct ep_data *epdata; 446 int value; 447 448 local_irq_disable(); 449 epdata = priv->epdata; 450 // spin_lock(&epdata->dev->lock); 451 if (likely(epdata && epdata->ep && priv->req)) 452 value = usb_ep_dequeue (epdata->ep, priv->req); 453 else 454 value = -EINVAL; 455 // spin_unlock(&epdata->dev->lock); 456 local_irq_enable(); 457 458 return value; 459 } 460 461 static void ep_user_copy_worker(struct work_struct *work) 462 { 463 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); 464 struct mm_struct *mm = priv->mm; 465 struct kiocb *iocb = priv->iocb; 466 size_t ret; 467 468 use_mm(mm); 469 ret = copy_to_iter(priv->buf, priv->actual, &priv->to); 470 unuse_mm(mm); 471 if (!ret) 472 ret = -EFAULT; 473 474 /* completing the iocb can drop the ctx and mm, don't touch mm after */ 475 iocb->ki_complete(iocb, ret, ret); 476 477 kfree(priv->buf); 478 kfree(priv->to_free); 479 kfree(priv); 480 } 481 482 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) 483 { 484 struct kiocb *iocb = req->context; 485 struct kiocb_priv *priv = iocb->private; 486 struct ep_data *epdata = priv->epdata; 487 488 /* lock against disconnect (and ideally, cancel) */ 489 spin_lock(&epdata->dev->lock); 490 priv->req = NULL; 491 priv->epdata = NULL; 492 493 /* if this was a write or a read returning no data then we 494 * don't need to copy anything to userspace, so we can 495 * complete the aio request immediately. 496 */ 497 if (priv->to_free == NULL || unlikely(req->actual == 0)) { 498 kfree(req->buf); 499 kfree(priv->to_free); 500 kfree(priv); 501 iocb->private = NULL; 502 /* aio_complete() reports bytes-transferred _and_ faults */ 503 504 iocb->ki_complete(iocb, req->actual ? req->actual : req->status, 505 req->status); 506 } else { 507 /* ep_copy_to_user() won't report both; we hide some faults */ 508 if (unlikely(0 != req->status)) 509 DBG(epdata->dev, "%s fault %d len %d\n", 510 ep->name, req->status, req->actual); 511 512 priv->buf = req->buf; 513 priv->actual = req->actual; 514 INIT_WORK(&priv->work, ep_user_copy_worker); 515 schedule_work(&priv->work); 516 } 517 518 usb_ep_free_request(ep, req); 519 spin_unlock(&epdata->dev->lock); 520 put_ep(epdata); 521 } 522 523 static ssize_t ep_aio(struct kiocb *iocb, 524 struct kiocb_priv *priv, 525 struct ep_data *epdata, 526 char *buf, 527 size_t len) 528 { 529 struct usb_request *req; 530 ssize_t value; 531 532 iocb->private = priv; 533 priv->iocb = iocb; 534 535 kiocb_set_cancel_fn(iocb, ep_aio_cancel); 536 get_ep(epdata); 537 priv->epdata = epdata; 538 priv->actual = 0; 539 priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */ 540 541 /* each kiocb is coupled to one usb_request, but we can't 542 * allocate or submit those if the host disconnected. 543 */ 544 spin_lock_irq(&epdata->dev->lock); 545 value = -ENODEV; 546 if (unlikely(epdata->ep == NULL)) 547 goto fail; 548 549 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); 550 value = -ENOMEM; 551 if (unlikely(!req)) 552 goto fail; 553 554 priv->req = req; 555 req->buf = buf; 556 req->length = len; 557 req->complete = ep_aio_complete; 558 req->context = iocb; 559 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); 560 if (unlikely(0 != value)) { 561 usb_ep_free_request(epdata->ep, req); 562 goto fail; 563 } 564 spin_unlock_irq(&epdata->dev->lock); 565 return -EIOCBQUEUED; 566 567 fail: 568 spin_unlock_irq(&epdata->dev->lock); 569 kfree(priv->to_free); 570 kfree(priv); 571 put_ep(epdata); 572 return value; 573 } 574 575 static ssize_t 576 ep_read_iter(struct kiocb *iocb, struct iov_iter *to) 577 { 578 struct file *file = iocb->ki_filp; 579 struct ep_data *epdata = file->private_data; 580 size_t len = iov_iter_count(to); 581 ssize_t value; 582 char *buf; 583 584 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) 585 return value; 586 587 /* halt any endpoint by doing a "wrong direction" i/o call */ 588 if (usb_endpoint_dir_in(&epdata->desc)) { 589 if (usb_endpoint_xfer_isoc(&epdata->desc) || 590 !is_sync_kiocb(iocb)) { 591 mutex_unlock(&epdata->lock); 592 return -EINVAL; 593 } 594 DBG (epdata->dev, "%s halt\n", epdata->name); 595 spin_lock_irq(&epdata->dev->lock); 596 if (likely(epdata->ep != NULL)) 597 usb_ep_set_halt(epdata->ep); 598 spin_unlock_irq(&epdata->dev->lock); 599 mutex_unlock(&epdata->lock); 600 return -EBADMSG; 601 } 602 603 buf = kmalloc(len, GFP_KERNEL); 604 if (unlikely(!buf)) { 605 mutex_unlock(&epdata->lock); 606 return -ENOMEM; 607 } 608 if (is_sync_kiocb(iocb)) { 609 value = ep_io(epdata, buf, len); 610 if (value >= 0 && (copy_to_iter(buf, value, to) != value)) 611 value = -EFAULT; 612 } else { 613 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); 614 value = -ENOMEM; 615 if (!priv) 616 goto fail; 617 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); 618 if (!priv->to_free) { 619 kfree(priv); 620 goto fail; 621 } 622 value = ep_aio(iocb, priv, epdata, buf, len); 623 if (value == -EIOCBQUEUED) 624 buf = NULL; 625 } 626 fail: 627 kfree(buf); 628 mutex_unlock(&epdata->lock); 629 return value; 630 } 631 632 static ssize_t ep_config(struct ep_data *, const char *, size_t); 633 634 static ssize_t 635 ep_write_iter(struct kiocb *iocb, struct iov_iter *from) 636 { 637 struct file *file = iocb->ki_filp; 638 struct ep_data *epdata = file->private_data; 639 size_t len = iov_iter_count(from); 640 bool configured; 641 ssize_t value; 642 char *buf; 643 644 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) 645 return value; 646 647 configured = epdata->state == STATE_EP_ENABLED; 648 649 /* halt any endpoint by doing a "wrong direction" i/o call */ 650 if (configured && !usb_endpoint_dir_in(&epdata->desc)) { 651 if (usb_endpoint_xfer_isoc(&epdata->desc) || 652 !is_sync_kiocb(iocb)) { 653 mutex_unlock(&epdata->lock); 654 return -EINVAL; 655 } 656 DBG (epdata->dev, "%s halt\n", epdata->name); 657 spin_lock_irq(&epdata->dev->lock); 658 if (likely(epdata->ep != NULL)) 659 usb_ep_set_halt(epdata->ep); 660 spin_unlock_irq(&epdata->dev->lock); 661 mutex_unlock(&epdata->lock); 662 return -EBADMSG; 663 } 664 665 buf = kmalloc(len, GFP_KERNEL); 666 if (unlikely(!buf)) { 667 mutex_unlock(&epdata->lock); 668 return -ENOMEM; 669 } 670 671 if (unlikely(!copy_from_iter_full(buf, len, from))) { 672 value = -EFAULT; 673 goto out; 674 } 675 676 if (unlikely(!configured)) { 677 value = ep_config(epdata, buf, len); 678 } else if (is_sync_kiocb(iocb)) { 679 value = ep_io(epdata, buf, len); 680 } else { 681 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); 682 value = -ENOMEM; 683 if (priv) { 684 value = ep_aio(iocb, priv, epdata, buf, len); 685 if (value == -EIOCBQUEUED) 686 buf = NULL; 687 } 688 } 689 out: 690 kfree(buf); 691 mutex_unlock(&epdata->lock); 692 return value; 693 } 694 695 /*----------------------------------------------------------------------*/ 696 697 /* used after endpoint configuration */ 698 static const struct file_operations ep_io_operations = { 699 .owner = THIS_MODULE, 700 701 .open = ep_open, 702 .release = ep_release, 703 .llseek = no_llseek, 704 .unlocked_ioctl = ep_ioctl, 705 .read_iter = ep_read_iter, 706 .write_iter = ep_write_iter, 707 }; 708 709 /* ENDPOINT INITIALIZATION 710 * 711 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR) 712 * status = write (fd, descriptors, sizeof descriptors) 713 * 714 * That write establishes the endpoint configuration, configuring 715 * the controller to process bulk, interrupt, or isochronous transfers 716 * at the right maxpacket size, and so on. 717 * 718 * The descriptors are message type 1, identified by a host order u32 719 * at the beginning of what's written. Descriptor order is: full/low 720 * speed descriptor, then optional high speed descriptor. 721 */ 722 static ssize_t 723 ep_config (struct ep_data *data, const char *buf, size_t len) 724 { 725 struct usb_ep *ep; 726 u32 tag; 727 int value, length = len; 728 729 if (data->state != STATE_EP_READY) { 730 value = -EL2HLT; 731 goto fail; 732 } 733 734 value = len; 735 if (len < USB_DT_ENDPOINT_SIZE + 4) 736 goto fail0; 737 738 /* we might need to change message format someday */ 739 memcpy(&tag, buf, 4); 740 if (tag != 1) { 741 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); 742 goto fail0; 743 } 744 buf += 4; 745 len -= 4; 746 747 /* NOTE: audio endpoint extensions not accepted here; 748 * just don't include the extra bytes. 749 */ 750 751 /* full/low speed descriptor, then high speed */ 752 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); 753 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE 754 || data->desc.bDescriptorType != USB_DT_ENDPOINT) 755 goto fail0; 756 if (len != USB_DT_ENDPOINT_SIZE) { 757 if (len != 2 * USB_DT_ENDPOINT_SIZE) 758 goto fail0; 759 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, 760 USB_DT_ENDPOINT_SIZE); 761 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE 762 || data->hs_desc.bDescriptorType 763 != USB_DT_ENDPOINT) { 764 DBG(data->dev, "config %s, bad hs length or type\n", 765 data->name); 766 goto fail0; 767 } 768 } 769 770 spin_lock_irq (&data->dev->lock); 771 if (data->dev->state == STATE_DEV_UNBOUND) { 772 value = -ENOENT; 773 goto gone; 774 } else { 775 ep = data->ep; 776 if (ep == NULL) { 777 value = -ENODEV; 778 goto gone; 779 } 780 } 781 switch (data->dev->gadget->speed) { 782 case USB_SPEED_LOW: 783 case USB_SPEED_FULL: 784 ep->desc = &data->desc; 785 break; 786 case USB_SPEED_HIGH: 787 /* fails if caller didn't provide that descriptor... */ 788 ep->desc = &data->hs_desc; 789 break; 790 default: 791 DBG(data->dev, "unconnected, %s init abandoned\n", 792 data->name); 793 value = -EINVAL; 794 goto gone; 795 } 796 value = usb_ep_enable(ep); 797 if (value == 0) { 798 data->state = STATE_EP_ENABLED; 799 value = length; 800 } 801 gone: 802 spin_unlock_irq (&data->dev->lock); 803 if (value < 0) { 804 fail: 805 data->desc.bDescriptorType = 0; 806 data->hs_desc.bDescriptorType = 0; 807 } 808 return value; 809 fail0: 810 value = -EINVAL; 811 goto fail; 812 } 813 814 static int 815 ep_open (struct inode *inode, struct file *fd) 816 { 817 struct ep_data *data = inode->i_private; 818 int value = -EBUSY; 819 820 if (mutex_lock_interruptible(&data->lock) != 0) 821 return -EINTR; 822 spin_lock_irq (&data->dev->lock); 823 if (data->dev->state == STATE_DEV_UNBOUND) 824 value = -ENOENT; 825 else if (data->state == STATE_EP_DISABLED) { 826 value = 0; 827 data->state = STATE_EP_READY; 828 get_ep (data); 829 fd->private_data = data; 830 VDEBUG (data->dev, "%s ready\n", data->name); 831 } else 832 DBG (data->dev, "%s state %d\n", 833 data->name, data->state); 834 spin_unlock_irq (&data->dev->lock); 835 mutex_unlock(&data->lock); 836 return value; 837 } 838 839 /*----------------------------------------------------------------------*/ 840 841 /* EP0 IMPLEMENTATION can be partly in userspace. 842 * 843 * Drivers that use this facility receive various events, including 844 * control requests the kernel doesn't handle. Drivers that don't 845 * use this facility may be too simple-minded for real applications. 846 */ 847 848 static inline void ep0_readable (struct dev_data *dev) 849 { 850 wake_up (&dev->wait); 851 kill_fasync (&dev->fasync, SIGIO, POLL_IN); 852 } 853 854 static void clean_req (struct usb_ep *ep, struct usb_request *req) 855 { 856 struct dev_data *dev = ep->driver_data; 857 858 if (req->buf != dev->rbuf) { 859 kfree(req->buf); 860 req->buf = dev->rbuf; 861 } 862 req->complete = epio_complete; 863 dev->setup_out_ready = 0; 864 } 865 866 static void ep0_complete (struct usb_ep *ep, struct usb_request *req) 867 { 868 struct dev_data *dev = ep->driver_data; 869 unsigned long flags; 870 int free = 1; 871 872 /* for control OUT, data must still get to userspace */ 873 spin_lock_irqsave(&dev->lock, flags); 874 if (!dev->setup_in) { 875 dev->setup_out_error = (req->status != 0); 876 if (!dev->setup_out_error) 877 free = 0; 878 dev->setup_out_ready = 1; 879 ep0_readable (dev); 880 } 881 882 /* clean up as appropriate */ 883 if (free && req->buf != &dev->rbuf) 884 clean_req (ep, req); 885 req->complete = epio_complete; 886 spin_unlock_irqrestore(&dev->lock, flags); 887 } 888 889 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len) 890 { 891 struct dev_data *dev = ep->driver_data; 892 893 if (dev->setup_out_ready) { 894 DBG (dev, "ep0 request busy!\n"); 895 return -EBUSY; 896 } 897 if (len > sizeof (dev->rbuf)) 898 req->buf = kmalloc(len, GFP_ATOMIC); 899 if (req->buf == NULL) { 900 req->buf = dev->rbuf; 901 return -ENOMEM; 902 } 903 req->complete = ep0_complete; 904 req->length = len; 905 req->zero = 0; 906 return 0; 907 } 908 909 static ssize_t 910 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) 911 { 912 struct dev_data *dev = fd->private_data; 913 ssize_t retval; 914 enum ep0_state state; 915 916 spin_lock_irq (&dev->lock); 917 if (dev->state <= STATE_DEV_OPENED) { 918 retval = -EINVAL; 919 goto done; 920 } 921 922 /* report fd mode change before acting on it */ 923 if (dev->setup_abort) { 924 dev->setup_abort = 0; 925 retval = -EIDRM; 926 goto done; 927 } 928 929 /* control DATA stage */ 930 if ((state = dev->state) == STATE_DEV_SETUP) { 931 932 if (dev->setup_in) { /* stall IN */ 933 VDEBUG(dev, "ep0in stall\n"); 934 (void) usb_ep_set_halt (dev->gadget->ep0); 935 retval = -EL2HLT; 936 dev->state = STATE_DEV_CONNECTED; 937 938 } else if (len == 0) { /* ack SET_CONFIGURATION etc */ 939 struct usb_ep *ep = dev->gadget->ep0; 940 struct usb_request *req = dev->req; 941 942 if ((retval = setup_req (ep, req, 0)) == 0) { 943 ++dev->udc_usage; 944 spin_unlock_irq (&dev->lock); 945 retval = usb_ep_queue (ep, req, GFP_KERNEL); 946 spin_lock_irq (&dev->lock); 947 --dev->udc_usage; 948 } 949 dev->state = STATE_DEV_CONNECTED; 950 951 /* assume that was SET_CONFIGURATION */ 952 if (dev->current_config) { 953 unsigned power; 954 955 if (gadget_is_dualspeed(dev->gadget) 956 && (dev->gadget->speed 957 == USB_SPEED_HIGH)) 958 power = dev->hs_config->bMaxPower; 959 else 960 power = dev->config->bMaxPower; 961 usb_gadget_vbus_draw(dev->gadget, 2 * power); 962 } 963 964 } else { /* collect OUT data */ 965 if ((fd->f_flags & O_NONBLOCK) != 0 966 && !dev->setup_out_ready) { 967 retval = -EAGAIN; 968 goto done; 969 } 970 spin_unlock_irq (&dev->lock); 971 retval = wait_event_interruptible (dev->wait, 972 dev->setup_out_ready != 0); 973 974 /* FIXME state could change from under us */ 975 spin_lock_irq (&dev->lock); 976 if (retval) 977 goto done; 978 979 if (dev->state != STATE_DEV_SETUP) { 980 retval = -ECANCELED; 981 goto done; 982 } 983 dev->state = STATE_DEV_CONNECTED; 984 985 if (dev->setup_out_error) 986 retval = -EIO; 987 else { 988 len = min (len, (size_t)dev->req->actual); 989 ++dev->udc_usage; 990 spin_unlock_irq(&dev->lock); 991 if (copy_to_user (buf, dev->req->buf, len)) 992 retval = -EFAULT; 993 else 994 retval = len; 995 spin_lock_irq(&dev->lock); 996 --dev->udc_usage; 997 clean_req (dev->gadget->ep0, dev->req); 998 /* NOTE userspace can't yet choose to stall */ 999 } 1000 } 1001 goto done; 1002 } 1003 1004 /* else normal: return event data */ 1005 if (len < sizeof dev->event [0]) { 1006 retval = -EINVAL; 1007 goto done; 1008 } 1009 len -= len % sizeof (struct usb_gadgetfs_event); 1010 dev->usermode_setup = 1; 1011 1012 scan: 1013 /* return queued events right away */ 1014 if (dev->ev_next != 0) { 1015 unsigned i, n; 1016 1017 n = len / sizeof (struct usb_gadgetfs_event); 1018 if (dev->ev_next < n) 1019 n = dev->ev_next; 1020 1021 /* ep0 i/o has special semantics during STATE_DEV_SETUP */ 1022 for (i = 0; i < n; i++) { 1023 if (dev->event [i].type == GADGETFS_SETUP) { 1024 dev->state = STATE_DEV_SETUP; 1025 n = i + 1; 1026 break; 1027 } 1028 } 1029 spin_unlock_irq (&dev->lock); 1030 len = n * sizeof (struct usb_gadgetfs_event); 1031 if (copy_to_user (buf, &dev->event, len)) 1032 retval = -EFAULT; 1033 else 1034 retval = len; 1035 if (len > 0) { 1036 /* NOTE this doesn't guard against broken drivers; 1037 * concurrent ep0 readers may lose events. 1038 */ 1039 spin_lock_irq (&dev->lock); 1040 if (dev->ev_next > n) { 1041 memmove(&dev->event[0], &dev->event[n], 1042 sizeof (struct usb_gadgetfs_event) 1043 * (dev->ev_next - n)); 1044 } 1045 dev->ev_next -= n; 1046 spin_unlock_irq (&dev->lock); 1047 } 1048 return retval; 1049 } 1050 if (fd->f_flags & O_NONBLOCK) { 1051 retval = -EAGAIN; 1052 goto done; 1053 } 1054 1055 switch (state) { 1056 default: 1057 DBG (dev, "fail %s, state %d\n", __func__, state); 1058 retval = -ESRCH; 1059 break; 1060 case STATE_DEV_UNCONNECTED: 1061 case STATE_DEV_CONNECTED: 1062 spin_unlock_irq (&dev->lock); 1063 DBG (dev, "%s wait\n", __func__); 1064 1065 /* wait for events */ 1066 retval = wait_event_interruptible (dev->wait, 1067 dev->ev_next != 0); 1068 if (retval < 0) 1069 return retval; 1070 spin_lock_irq (&dev->lock); 1071 goto scan; 1072 } 1073 1074 done: 1075 spin_unlock_irq (&dev->lock); 1076 return retval; 1077 } 1078 1079 static struct usb_gadgetfs_event * 1080 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type) 1081 { 1082 struct usb_gadgetfs_event *event; 1083 unsigned i; 1084 1085 switch (type) { 1086 /* these events purge the queue */ 1087 case GADGETFS_DISCONNECT: 1088 if (dev->state == STATE_DEV_SETUP) 1089 dev->setup_abort = 1; 1090 // FALL THROUGH 1091 case GADGETFS_CONNECT: 1092 dev->ev_next = 0; 1093 break; 1094 case GADGETFS_SETUP: /* previous request timed out */ 1095 case GADGETFS_SUSPEND: /* same effect */ 1096 /* these events can't be repeated */ 1097 for (i = 0; i != dev->ev_next; i++) { 1098 if (dev->event [i].type != type) 1099 continue; 1100 DBG(dev, "discard old event[%d] %d\n", i, type); 1101 dev->ev_next--; 1102 if (i == dev->ev_next) 1103 break; 1104 /* indices start at zero, for simplicity */ 1105 memmove (&dev->event [i], &dev->event [i + 1], 1106 sizeof (struct usb_gadgetfs_event) 1107 * (dev->ev_next - i)); 1108 } 1109 break; 1110 default: 1111 BUG (); 1112 } 1113 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type); 1114 event = &dev->event [dev->ev_next++]; 1115 BUG_ON (dev->ev_next > N_EVENT); 1116 memset (event, 0, sizeof *event); 1117 event->type = type; 1118 return event; 1119 } 1120 1121 static ssize_t 1122 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) 1123 { 1124 struct dev_data *dev = fd->private_data; 1125 ssize_t retval = -ESRCH; 1126 1127 /* report fd mode change before acting on it */ 1128 if (dev->setup_abort) { 1129 dev->setup_abort = 0; 1130 retval = -EIDRM; 1131 1132 /* data and/or status stage for control request */ 1133 } else if (dev->state == STATE_DEV_SETUP) { 1134 1135 len = min_t(size_t, len, dev->setup_wLength); 1136 if (dev->setup_in) { 1137 retval = setup_req (dev->gadget->ep0, dev->req, len); 1138 if (retval == 0) { 1139 dev->state = STATE_DEV_CONNECTED; 1140 ++dev->udc_usage; 1141 spin_unlock_irq (&dev->lock); 1142 if (copy_from_user (dev->req->buf, buf, len)) 1143 retval = -EFAULT; 1144 else { 1145 if (len < dev->setup_wLength) 1146 dev->req->zero = 1; 1147 retval = usb_ep_queue ( 1148 dev->gadget->ep0, dev->req, 1149 GFP_KERNEL); 1150 } 1151 spin_lock_irq(&dev->lock); 1152 --dev->udc_usage; 1153 if (retval < 0) { 1154 clean_req (dev->gadget->ep0, dev->req); 1155 } else 1156 retval = len; 1157 1158 return retval; 1159 } 1160 1161 /* can stall some OUT transfers */ 1162 } else if (dev->setup_can_stall) { 1163 VDEBUG(dev, "ep0out stall\n"); 1164 (void) usb_ep_set_halt (dev->gadget->ep0); 1165 retval = -EL2HLT; 1166 dev->state = STATE_DEV_CONNECTED; 1167 } else { 1168 DBG(dev, "bogus ep0out stall!\n"); 1169 } 1170 } else 1171 DBG (dev, "fail %s, state %d\n", __func__, dev->state); 1172 1173 return retval; 1174 } 1175 1176 static int 1177 ep0_fasync (int f, struct file *fd, int on) 1178 { 1179 struct dev_data *dev = fd->private_data; 1180 // caller must F_SETOWN before signal delivery happens 1181 VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off"); 1182 return fasync_helper (f, fd, on, &dev->fasync); 1183 } 1184 1185 static struct usb_gadget_driver gadgetfs_driver; 1186 1187 static int 1188 dev_release (struct inode *inode, struct file *fd) 1189 { 1190 struct dev_data *dev = fd->private_data; 1191 1192 /* closing ep0 === shutdown all */ 1193 1194 if (dev->gadget_registered) { 1195 usb_gadget_unregister_driver (&gadgetfs_driver); 1196 dev->gadget_registered = false; 1197 } 1198 1199 /* at this point "good" hardware has disconnected the 1200 * device from USB; the host won't see it any more. 1201 * alternatively, all host requests will time out. 1202 */ 1203 1204 kfree (dev->buf); 1205 dev->buf = NULL; 1206 1207 /* other endpoints were all decoupled from this device */ 1208 spin_lock_irq(&dev->lock); 1209 dev->state = STATE_DEV_DISABLED; 1210 spin_unlock_irq(&dev->lock); 1211 1212 put_dev (dev); 1213 return 0; 1214 } 1215 1216 static unsigned int 1217 ep0_poll (struct file *fd, poll_table *wait) 1218 { 1219 struct dev_data *dev = fd->private_data; 1220 int mask = 0; 1221 1222 if (dev->state <= STATE_DEV_OPENED) 1223 return DEFAULT_POLLMASK; 1224 1225 poll_wait(fd, &dev->wait, wait); 1226 1227 spin_lock_irq (&dev->lock); 1228 1229 /* report fd mode change before acting on it */ 1230 if (dev->setup_abort) { 1231 dev->setup_abort = 0; 1232 mask = POLLHUP; 1233 goto out; 1234 } 1235 1236 if (dev->state == STATE_DEV_SETUP) { 1237 if (dev->setup_in || dev->setup_can_stall) 1238 mask = POLLOUT; 1239 } else { 1240 if (dev->ev_next != 0) 1241 mask = POLLIN; 1242 } 1243 out: 1244 spin_unlock_irq(&dev->lock); 1245 return mask; 1246 } 1247 1248 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) 1249 { 1250 struct dev_data *dev = fd->private_data; 1251 struct usb_gadget *gadget = dev->gadget; 1252 long ret = -ENOTTY; 1253 1254 spin_lock_irq(&dev->lock); 1255 if (dev->state == STATE_DEV_OPENED || 1256 dev->state == STATE_DEV_UNBOUND) { 1257 /* Not bound to a UDC */ 1258 } else if (gadget->ops->ioctl) { 1259 ++dev->udc_usage; 1260 spin_unlock_irq(&dev->lock); 1261 1262 ret = gadget->ops->ioctl (gadget, code, value); 1263 1264 spin_lock_irq(&dev->lock); 1265 --dev->udc_usage; 1266 } 1267 spin_unlock_irq(&dev->lock); 1268 1269 return ret; 1270 } 1271 1272 /*----------------------------------------------------------------------*/ 1273 1274 /* The in-kernel gadget driver handles most ep0 issues, in particular 1275 * enumerating the single configuration (as provided from user space). 1276 * 1277 * Unrecognized ep0 requests may be handled in user space. 1278 */ 1279 1280 static void make_qualifier (struct dev_data *dev) 1281 { 1282 struct usb_qualifier_descriptor qual; 1283 struct usb_device_descriptor *desc; 1284 1285 qual.bLength = sizeof qual; 1286 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER; 1287 qual.bcdUSB = cpu_to_le16 (0x0200); 1288 1289 desc = dev->dev; 1290 qual.bDeviceClass = desc->bDeviceClass; 1291 qual.bDeviceSubClass = desc->bDeviceSubClass; 1292 qual.bDeviceProtocol = desc->bDeviceProtocol; 1293 1294 /* assumes ep0 uses the same value for both speeds ... */ 1295 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket; 1296 1297 qual.bNumConfigurations = 1; 1298 qual.bRESERVED = 0; 1299 1300 memcpy (dev->rbuf, &qual, sizeof qual); 1301 } 1302 1303 static int 1304 config_buf (struct dev_data *dev, u8 type, unsigned index) 1305 { 1306 int len; 1307 int hs = 0; 1308 1309 /* only one configuration */ 1310 if (index > 0) 1311 return -EINVAL; 1312 1313 if (gadget_is_dualspeed(dev->gadget)) { 1314 hs = (dev->gadget->speed == USB_SPEED_HIGH); 1315 if (type == USB_DT_OTHER_SPEED_CONFIG) 1316 hs = !hs; 1317 } 1318 if (hs) { 1319 dev->req->buf = dev->hs_config; 1320 len = le16_to_cpu(dev->hs_config->wTotalLength); 1321 } else { 1322 dev->req->buf = dev->config; 1323 len = le16_to_cpu(dev->config->wTotalLength); 1324 } 1325 ((u8 *)dev->req->buf) [1] = type; 1326 return len; 1327 } 1328 1329 static int 1330 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) 1331 { 1332 struct dev_data *dev = get_gadget_data (gadget); 1333 struct usb_request *req = dev->req; 1334 int value = -EOPNOTSUPP; 1335 struct usb_gadgetfs_event *event; 1336 u16 w_value = le16_to_cpu(ctrl->wValue); 1337 u16 w_length = le16_to_cpu(ctrl->wLength); 1338 1339 spin_lock (&dev->lock); 1340 dev->setup_abort = 0; 1341 if (dev->state == STATE_DEV_UNCONNECTED) { 1342 if (gadget_is_dualspeed(gadget) 1343 && gadget->speed == USB_SPEED_HIGH 1344 && dev->hs_config == NULL) { 1345 spin_unlock(&dev->lock); 1346 ERROR (dev, "no high speed config??\n"); 1347 return -EINVAL; 1348 } 1349 1350 dev->state = STATE_DEV_CONNECTED; 1351 1352 INFO (dev, "connected\n"); 1353 event = next_event (dev, GADGETFS_CONNECT); 1354 event->u.speed = gadget->speed; 1355 ep0_readable (dev); 1356 1357 /* host may have given up waiting for response. we can miss control 1358 * requests handled lower down (device/endpoint status and features); 1359 * then ep0_{read,write} will report the wrong status. controller 1360 * driver will have aborted pending i/o. 1361 */ 1362 } else if (dev->state == STATE_DEV_SETUP) 1363 dev->setup_abort = 1; 1364 1365 req->buf = dev->rbuf; 1366 req->context = NULL; 1367 value = -EOPNOTSUPP; 1368 switch (ctrl->bRequest) { 1369 1370 case USB_REQ_GET_DESCRIPTOR: 1371 if (ctrl->bRequestType != USB_DIR_IN) 1372 goto unrecognized; 1373 switch (w_value >> 8) { 1374 1375 case USB_DT_DEVICE: 1376 value = min (w_length, (u16) sizeof *dev->dev); 1377 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket; 1378 req->buf = dev->dev; 1379 break; 1380 case USB_DT_DEVICE_QUALIFIER: 1381 if (!dev->hs_config) 1382 break; 1383 value = min (w_length, (u16) 1384 sizeof (struct usb_qualifier_descriptor)); 1385 make_qualifier (dev); 1386 break; 1387 case USB_DT_OTHER_SPEED_CONFIG: 1388 // FALLTHROUGH 1389 case USB_DT_CONFIG: 1390 value = config_buf (dev, 1391 w_value >> 8, 1392 w_value & 0xff); 1393 if (value >= 0) 1394 value = min (w_length, (u16) value); 1395 break; 1396 case USB_DT_STRING: 1397 goto unrecognized; 1398 1399 default: // all others are errors 1400 break; 1401 } 1402 break; 1403 1404 /* currently one config, two speeds */ 1405 case USB_REQ_SET_CONFIGURATION: 1406 if (ctrl->bRequestType != 0) 1407 goto unrecognized; 1408 if (0 == (u8) w_value) { 1409 value = 0; 1410 dev->current_config = 0; 1411 usb_gadget_vbus_draw(gadget, 8 /* mA */ ); 1412 // user mode expected to disable endpoints 1413 } else { 1414 u8 config, power; 1415 1416 if (gadget_is_dualspeed(gadget) 1417 && gadget->speed == USB_SPEED_HIGH) { 1418 config = dev->hs_config->bConfigurationValue; 1419 power = dev->hs_config->bMaxPower; 1420 } else { 1421 config = dev->config->bConfigurationValue; 1422 power = dev->config->bMaxPower; 1423 } 1424 1425 if (config == (u8) w_value) { 1426 value = 0; 1427 dev->current_config = config; 1428 usb_gadget_vbus_draw(gadget, 2 * power); 1429 } 1430 } 1431 1432 /* report SET_CONFIGURATION like any other control request, 1433 * except that usermode may not stall this. the next 1434 * request mustn't be allowed start until this finishes: 1435 * endpoints and threads set up, etc. 1436 * 1437 * NOTE: older PXA hardware (before PXA 255: without UDCCFR) 1438 * has bad/racey automagic that prevents synchronizing here. 1439 * even kernel mode drivers often miss them. 1440 */ 1441 if (value == 0) { 1442 INFO (dev, "configuration #%d\n", dev->current_config); 1443 usb_gadget_set_state(gadget, USB_STATE_CONFIGURED); 1444 if (dev->usermode_setup) { 1445 dev->setup_can_stall = 0; 1446 goto delegate; 1447 } 1448 } 1449 break; 1450 1451 #ifndef CONFIG_USB_PXA25X 1452 /* PXA automagically handles this request too */ 1453 case USB_REQ_GET_CONFIGURATION: 1454 if (ctrl->bRequestType != 0x80) 1455 goto unrecognized; 1456 *(u8 *)req->buf = dev->current_config; 1457 value = min (w_length, (u16) 1); 1458 break; 1459 #endif 1460 1461 default: 1462 unrecognized: 1463 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n", 1464 dev->usermode_setup ? "delegate" : "fail", 1465 ctrl->bRequestType, ctrl->bRequest, 1466 w_value, le16_to_cpu(ctrl->wIndex), w_length); 1467 1468 /* if there's an ep0 reader, don't stall */ 1469 if (dev->usermode_setup) { 1470 dev->setup_can_stall = 1; 1471 delegate: 1472 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN) 1473 ? 1 : 0; 1474 dev->setup_wLength = w_length; 1475 dev->setup_out_ready = 0; 1476 dev->setup_out_error = 0; 1477 value = 0; 1478 1479 /* read DATA stage for OUT right away */ 1480 if (unlikely (!dev->setup_in && w_length)) { 1481 value = setup_req (gadget->ep0, dev->req, 1482 w_length); 1483 if (value < 0) 1484 break; 1485 1486 ++dev->udc_usage; 1487 spin_unlock (&dev->lock); 1488 value = usb_ep_queue (gadget->ep0, dev->req, 1489 GFP_KERNEL); 1490 spin_lock (&dev->lock); 1491 --dev->udc_usage; 1492 if (value < 0) { 1493 clean_req (gadget->ep0, dev->req); 1494 break; 1495 } 1496 1497 /* we can't currently stall these */ 1498 dev->setup_can_stall = 0; 1499 } 1500 1501 /* state changes when reader collects event */ 1502 event = next_event (dev, GADGETFS_SETUP); 1503 event->u.setup = *ctrl; 1504 ep0_readable (dev); 1505 spin_unlock (&dev->lock); 1506 return 0; 1507 } 1508 } 1509 1510 /* proceed with data transfer and status phases? */ 1511 if (value >= 0 && dev->state != STATE_DEV_SETUP) { 1512 req->length = value; 1513 req->zero = value < w_length; 1514 1515 ++dev->udc_usage; 1516 spin_unlock (&dev->lock); 1517 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); 1518 spin_lock(&dev->lock); 1519 --dev->udc_usage; 1520 spin_unlock(&dev->lock); 1521 if (value < 0) { 1522 DBG (dev, "ep_queue --> %d\n", value); 1523 req->status = 0; 1524 } 1525 return value; 1526 } 1527 1528 /* device stalls when value < 0 */ 1529 spin_unlock (&dev->lock); 1530 return value; 1531 } 1532 1533 static void destroy_ep_files (struct dev_data *dev) 1534 { 1535 DBG (dev, "%s %d\n", __func__, dev->state); 1536 1537 /* dev->state must prevent interference */ 1538 spin_lock_irq (&dev->lock); 1539 while (!list_empty(&dev->epfiles)) { 1540 struct ep_data *ep; 1541 struct inode *parent; 1542 struct dentry *dentry; 1543 1544 /* break link to FS */ 1545 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); 1546 list_del_init (&ep->epfiles); 1547 spin_unlock_irq (&dev->lock); 1548 1549 dentry = ep->dentry; 1550 ep->dentry = NULL; 1551 parent = d_inode(dentry->d_parent); 1552 1553 /* break link to controller */ 1554 mutex_lock(&ep->lock); 1555 if (ep->state == STATE_EP_ENABLED) 1556 (void) usb_ep_disable (ep->ep); 1557 ep->state = STATE_EP_UNBOUND; 1558 usb_ep_free_request (ep->ep, ep->req); 1559 ep->ep = NULL; 1560 mutex_unlock(&ep->lock); 1561 1562 wake_up (&ep->wait); 1563 put_ep (ep); 1564 1565 /* break link to dcache */ 1566 inode_lock(parent); 1567 d_delete (dentry); 1568 dput (dentry); 1569 inode_unlock(parent); 1570 1571 spin_lock_irq (&dev->lock); 1572 } 1573 spin_unlock_irq (&dev->lock); 1574 } 1575 1576 1577 static struct dentry * 1578 gadgetfs_create_file (struct super_block *sb, char const *name, 1579 void *data, const struct file_operations *fops); 1580 1581 static int activate_ep_files (struct dev_data *dev) 1582 { 1583 struct usb_ep *ep; 1584 struct ep_data *data; 1585 1586 gadget_for_each_ep (ep, dev->gadget) { 1587 1588 data = kzalloc(sizeof(*data), GFP_KERNEL); 1589 if (!data) 1590 goto enomem0; 1591 data->state = STATE_EP_DISABLED; 1592 mutex_init(&data->lock); 1593 init_waitqueue_head (&data->wait); 1594 1595 strncpy (data->name, ep->name, sizeof (data->name) - 1); 1596 refcount_set (&data->count, 1); 1597 data->dev = dev; 1598 get_dev (dev); 1599 1600 data->ep = ep; 1601 ep->driver_data = data; 1602 1603 data->req = usb_ep_alloc_request (ep, GFP_KERNEL); 1604 if (!data->req) 1605 goto enomem1; 1606 1607 data->dentry = gadgetfs_create_file (dev->sb, data->name, 1608 data, &ep_io_operations); 1609 if (!data->dentry) 1610 goto enomem2; 1611 list_add_tail (&data->epfiles, &dev->epfiles); 1612 } 1613 return 0; 1614 1615 enomem2: 1616 usb_ep_free_request (ep, data->req); 1617 enomem1: 1618 put_dev (dev); 1619 kfree (data); 1620 enomem0: 1621 DBG (dev, "%s enomem\n", __func__); 1622 destroy_ep_files (dev); 1623 return -ENOMEM; 1624 } 1625 1626 static void 1627 gadgetfs_unbind (struct usb_gadget *gadget) 1628 { 1629 struct dev_data *dev = get_gadget_data (gadget); 1630 1631 DBG (dev, "%s\n", __func__); 1632 1633 spin_lock_irq (&dev->lock); 1634 dev->state = STATE_DEV_UNBOUND; 1635 while (dev->udc_usage > 0) { 1636 spin_unlock_irq(&dev->lock); 1637 usleep_range(1000, 2000); 1638 spin_lock_irq(&dev->lock); 1639 } 1640 spin_unlock_irq (&dev->lock); 1641 1642 destroy_ep_files (dev); 1643 gadget->ep0->driver_data = NULL; 1644 set_gadget_data (gadget, NULL); 1645 1646 /* we've already been disconnected ... no i/o is active */ 1647 if (dev->req) 1648 usb_ep_free_request (gadget->ep0, dev->req); 1649 DBG (dev, "%s done\n", __func__); 1650 put_dev (dev); 1651 } 1652 1653 static struct dev_data *the_device; 1654 1655 static int gadgetfs_bind(struct usb_gadget *gadget, 1656 struct usb_gadget_driver *driver) 1657 { 1658 struct dev_data *dev = the_device; 1659 1660 if (!dev) 1661 return -ESRCH; 1662 if (0 != strcmp (CHIP, gadget->name)) { 1663 pr_err("%s expected %s controller not %s\n", 1664 shortname, CHIP, gadget->name); 1665 return -ENODEV; 1666 } 1667 1668 set_gadget_data (gadget, dev); 1669 dev->gadget = gadget; 1670 gadget->ep0->driver_data = dev; 1671 1672 /* preallocate control response and buffer */ 1673 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL); 1674 if (!dev->req) 1675 goto enomem; 1676 dev->req->context = NULL; 1677 dev->req->complete = epio_complete; 1678 1679 if (activate_ep_files (dev) < 0) 1680 goto enomem; 1681 1682 INFO (dev, "bound to %s driver\n", gadget->name); 1683 spin_lock_irq(&dev->lock); 1684 dev->state = STATE_DEV_UNCONNECTED; 1685 spin_unlock_irq(&dev->lock); 1686 get_dev (dev); 1687 return 0; 1688 1689 enomem: 1690 gadgetfs_unbind (gadget); 1691 return -ENOMEM; 1692 } 1693 1694 static void 1695 gadgetfs_disconnect (struct usb_gadget *gadget) 1696 { 1697 struct dev_data *dev = get_gadget_data (gadget); 1698 unsigned long flags; 1699 1700 spin_lock_irqsave (&dev->lock, flags); 1701 if (dev->state == STATE_DEV_UNCONNECTED) 1702 goto exit; 1703 dev->state = STATE_DEV_UNCONNECTED; 1704 1705 INFO (dev, "disconnected\n"); 1706 next_event (dev, GADGETFS_DISCONNECT); 1707 ep0_readable (dev); 1708 exit: 1709 spin_unlock_irqrestore (&dev->lock, flags); 1710 } 1711 1712 static void 1713 gadgetfs_suspend (struct usb_gadget *gadget) 1714 { 1715 struct dev_data *dev = get_gadget_data (gadget); 1716 unsigned long flags; 1717 1718 INFO (dev, "suspended from state %d\n", dev->state); 1719 spin_lock_irqsave(&dev->lock, flags); 1720 switch (dev->state) { 1721 case STATE_DEV_SETUP: // VERY odd... host died?? 1722 case STATE_DEV_CONNECTED: 1723 case STATE_DEV_UNCONNECTED: 1724 next_event (dev, GADGETFS_SUSPEND); 1725 ep0_readable (dev); 1726 /* FALLTHROUGH */ 1727 default: 1728 break; 1729 } 1730 spin_unlock_irqrestore(&dev->lock, flags); 1731 } 1732 1733 static struct usb_gadget_driver gadgetfs_driver = { 1734 .function = (char *) driver_desc, 1735 .bind = gadgetfs_bind, 1736 .unbind = gadgetfs_unbind, 1737 .setup = gadgetfs_setup, 1738 .reset = gadgetfs_disconnect, 1739 .disconnect = gadgetfs_disconnect, 1740 .suspend = gadgetfs_suspend, 1741 1742 .driver = { 1743 .name = (char *) shortname, 1744 }, 1745 }; 1746 1747 /*----------------------------------------------------------------------*/ 1748 /* DEVICE INITIALIZATION 1749 * 1750 * fd = open ("/dev/gadget/$CHIP", O_RDWR) 1751 * status = write (fd, descriptors, sizeof descriptors) 1752 * 1753 * That write establishes the device configuration, so the kernel can 1754 * bind to the controller ... guaranteeing it can handle enumeration 1755 * at all necessary speeds. Descriptor order is: 1756 * 1757 * . message tag (u32, host order) ... for now, must be zero; it 1758 * would change to support features like multi-config devices 1759 * . full/low speed config ... all wTotalLength bytes (with interface, 1760 * class, altsetting, endpoint, and other descriptors) 1761 * . high speed config ... all descriptors, for high speed operation; 1762 * this one's optional except for high-speed hardware 1763 * . device descriptor 1764 * 1765 * Endpoints are not yet enabled. Drivers must wait until device 1766 * configuration and interface altsetting changes create 1767 * the need to configure (or unconfigure) them. 1768 * 1769 * After initialization, the device stays active for as long as that 1770 * $CHIP file is open. Events must then be read from that descriptor, 1771 * such as configuration notifications. 1772 */ 1773 1774 static int is_valid_config(struct usb_config_descriptor *config, 1775 unsigned int total) 1776 { 1777 return config->bDescriptorType == USB_DT_CONFIG 1778 && config->bLength == USB_DT_CONFIG_SIZE 1779 && total >= USB_DT_CONFIG_SIZE 1780 && config->bConfigurationValue != 0 1781 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0 1782 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0; 1783 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */ 1784 /* FIXME check lengths: walk to end */ 1785 } 1786 1787 static ssize_t 1788 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) 1789 { 1790 struct dev_data *dev = fd->private_data; 1791 ssize_t value = len, length = len; 1792 unsigned total; 1793 u32 tag; 1794 char *kbuf; 1795 1796 spin_lock_irq(&dev->lock); 1797 if (dev->state > STATE_DEV_OPENED) { 1798 value = ep0_write(fd, buf, len, ptr); 1799 spin_unlock_irq(&dev->lock); 1800 return value; 1801 } 1802 spin_unlock_irq(&dev->lock); 1803 1804 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) || 1805 (len > PAGE_SIZE * 4)) 1806 return -EINVAL; 1807 1808 /* we might need to change message format someday */ 1809 if (copy_from_user (&tag, buf, 4)) 1810 return -EFAULT; 1811 if (tag != 0) 1812 return -EINVAL; 1813 buf += 4; 1814 length -= 4; 1815 1816 kbuf = memdup_user(buf, length); 1817 if (IS_ERR(kbuf)) 1818 return PTR_ERR(kbuf); 1819 1820 spin_lock_irq (&dev->lock); 1821 value = -EINVAL; 1822 if (dev->buf) { 1823 kfree(kbuf); 1824 goto fail; 1825 } 1826 dev->buf = kbuf; 1827 1828 /* full or low speed config */ 1829 dev->config = (void *) kbuf; 1830 total = le16_to_cpu(dev->config->wTotalLength); 1831 if (!is_valid_config(dev->config, total) || 1832 total > length - USB_DT_DEVICE_SIZE) 1833 goto fail; 1834 kbuf += total; 1835 length -= total; 1836 1837 /* optional high speed config */ 1838 if (kbuf [1] == USB_DT_CONFIG) { 1839 dev->hs_config = (void *) kbuf; 1840 total = le16_to_cpu(dev->hs_config->wTotalLength); 1841 if (!is_valid_config(dev->hs_config, total) || 1842 total > length - USB_DT_DEVICE_SIZE) 1843 goto fail; 1844 kbuf += total; 1845 length -= total; 1846 } else { 1847 dev->hs_config = NULL; 1848 } 1849 1850 /* could support multiple configs, using another encoding! */ 1851 1852 /* device descriptor (tweaked for paranoia) */ 1853 if (length != USB_DT_DEVICE_SIZE) 1854 goto fail; 1855 dev->dev = (void *)kbuf; 1856 if (dev->dev->bLength != USB_DT_DEVICE_SIZE 1857 || dev->dev->bDescriptorType != USB_DT_DEVICE 1858 || dev->dev->bNumConfigurations != 1) 1859 goto fail; 1860 dev->dev->bcdUSB = cpu_to_le16 (0x0200); 1861 1862 /* triggers gadgetfs_bind(); then we can enumerate. */ 1863 spin_unlock_irq (&dev->lock); 1864 if (dev->hs_config) 1865 gadgetfs_driver.max_speed = USB_SPEED_HIGH; 1866 else 1867 gadgetfs_driver.max_speed = USB_SPEED_FULL; 1868 1869 value = usb_gadget_probe_driver(&gadgetfs_driver); 1870 if (value != 0) { 1871 kfree (dev->buf); 1872 dev->buf = NULL; 1873 } else { 1874 /* at this point "good" hardware has for the first time 1875 * let the USB the host see us. alternatively, if users 1876 * unplug/replug that will clear all the error state. 1877 * 1878 * note: everything running before here was guaranteed 1879 * to choke driver model style diagnostics. from here 1880 * on, they can work ... except in cleanup paths that 1881 * kick in after the ep0 descriptor is closed. 1882 */ 1883 value = len; 1884 dev->gadget_registered = true; 1885 } 1886 return value; 1887 1888 fail: 1889 spin_unlock_irq (&dev->lock); 1890 pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev); 1891 kfree (dev->buf); 1892 dev->buf = NULL; 1893 return value; 1894 } 1895 1896 static int 1897 dev_open (struct inode *inode, struct file *fd) 1898 { 1899 struct dev_data *dev = inode->i_private; 1900 int value = -EBUSY; 1901 1902 spin_lock_irq(&dev->lock); 1903 if (dev->state == STATE_DEV_DISABLED) { 1904 dev->ev_next = 0; 1905 dev->state = STATE_DEV_OPENED; 1906 fd->private_data = dev; 1907 get_dev (dev); 1908 value = 0; 1909 } 1910 spin_unlock_irq(&dev->lock); 1911 return value; 1912 } 1913 1914 static const struct file_operations ep0_operations = { 1915 .llseek = no_llseek, 1916 1917 .open = dev_open, 1918 .read = ep0_read, 1919 .write = dev_config, 1920 .fasync = ep0_fasync, 1921 .poll = ep0_poll, 1922 .unlocked_ioctl = dev_ioctl, 1923 .release = dev_release, 1924 }; 1925 1926 /*----------------------------------------------------------------------*/ 1927 1928 /* FILESYSTEM AND SUPERBLOCK OPERATIONS 1929 * 1930 * Mounting the filesystem creates a controller file, used first for 1931 * device configuration then later for event monitoring. 1932 */ 1933 1934 1935 /* FIXME PAM etc could set this security policy without mount options 1936 * if epfiles inherited ownership and permissons from ep0 ... 1937 */ 1938 1939 static unsigned default_uid; 1940 static unsigned default_gid; 1941 static unsigned default_perm = S_IRUSR | S_IWUSR; 1942 1943 module_param (default_uid, uint, 0644); 1944 module_param (default_gid, uint, 0644); 1945 module_param (default_perm, uint, 0644); 1946 1947 1948 static struct inode * 1949 gadgetfs_make_inode (struct super_block *sb, 1950 void *data, const struct file_operations *fops, 1951 int mode) 1952 { 1953 struct inode *inode = new_inode (sb); 1954 1955 if (inode) { 1956 inode->i_ino = get_next_ino(); 1957 inode->i_mode = mode; 1958 inode->i_uid = make_kuid(&init_user_ns, default_uid); 1959 inode->i_gid = make_kgid(&init_user_ns, default_gid); 1960 inode->i_atime = inode->i_mtime = inode->i_ctime 1961 = current_time(inode); 1962 inode->i_private = data; 1963 inode->i_fop = fops; 1964 } 1965 return inode; 1966 } 1967 1968 /* creates in fs root directory, so non-renamable and non-linkable. 1969 * so inode and dentry are paired, until device reconfig. 1970 */ 1971 static struct dentry * 1972 gadgetfs_create_file (struct super_block *sb, char const *name, 1973 void *data, const struct file_operations *fops) 1974 { 1975 struct dentry *dentry; 1976 struct inode *inode; 1977 1978 dentry = d_alloc_name(sb->s_root, name); 1979 if (!dentry) 1980 return NULL; 1981 1982 inode = gadgetfs_make_inode (sb, data, fops, 1983 S_IFREG | (default_perm & S_IRWXUGO)); 1984 if (!inode) { 1985 dput(dentry); 1986 return NULL; 1987 } 1988 d_add (dentry, inode); 1989 return dentry; 1990 } 1991 1992 static const struct super_operations gadget_fs_operations = { 1993 .statfs = simple_statfs, 1994 .drop_inode = generic_delete_inode, 1995 }; 1996 1997 static int 1998 gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) 1999 { 2000 struct inode *inode; 2001 struct dev_data *dev; 2002 2003 if (the_device) 2004 return -ESRCH; 2005 2006 CHIP = usb_get_gadget_udc_name(); 2007 if (!CHIP) 2008 return -ENODEV; 2009 2010 /* superblock */ 2011 sb->s_blocksize = PAGE_SIZE; 2012 sb->s_blocksize_bits = PAGE_SHIFT; 2013 sb->s_magic = GADGETFS_MAGIC; 2014 sb->s_op = &gadget_fs_operations; 2015 sb->s_time_gran = 1; 2016 2017 /* root inode */ 2018 inode = gadgetfs_make_inode (sb, 2019 NULL, &simple_dir_operations, 2020 S_IFDIR | S_IRUGO | S_IXUGO); 2021 if (!inode) 2022 goto Enomem; 2023 inode->i_op = &simple_dir_inode_operations; 2024 if (!(sb->s_root = d_make_root (inode))) 2025 goto Enomem; 2026 2027 /* the ep0 file is named after the controller we expect; 2028 * user mode code can use it for sanity checks, like we do. 2029 */ 2030 dev = dev_new (); 2031 if (!dev) 2032 goto Enomem; 2033 2034 dev->sb = sb; 2035 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); 2036 if (!dev->dentry) { 2037 put_dev(dev); 2038 goto Enomem; 2039 } 2040 2041 /* other endpoint files are available after hardware setup, 2042 * from binding to a controller. 2043 */ 2044 the_device = dev; 2045 return 0; 2046 2047 Enomem: 2048 return -ENOMEM; 2049 } 2050 2051 /* "mount -t gadgetfs path /dev/gadget" ends up here */ 2052 static struct dentry * 2053 gadgetfs_mount (struct file_system_type *t, int flags, 2054 const char *path, void *opts) 2055 { 2056 return mount_single (t, flags, opts, gadgetfs_fill_super); 2057 } 2058 2059 static void 2060 gadgetfs_kill_sb (struct super_block *sb) 2061 { 2062 kill_litter_super (sb); 2063 if (the_device) { 2064 put_dev (the_device); 2065 the_device = NULL; 2066 } 2067 kfree(CHIP); 2068 CHIP = NULL; 2069 } 2070 2071 /*----------------------------------------------------------------------*/ 2072 2073 static struct file_system_type gadgetfs_type = { 2074 .owner = THIS_MODULE, 2075 .name = shortname, 2076 .mount = gadgetfs_mount, 2077 .kill_sb = gadgetfs_kill_sb, 2078 }; 2079 MODULE_ALIAS_FS("gadgetfs"); 2080 2081 /*----------------------------------------------------------------------*/ 2082 2083 static int __init init (void) 2084 { 2085 int status; 2086 2087 status = register_filesystem (&gadgetfs_type); 2088 if (status == 0) 2089 pr_info ("%s: %s, version " DRIVER_VERSION "\n", 2090 shortname, driver_desc); 2091 return status; 2092 } 2093 module_init (init); 2094 2095 static void __exit cleanup (void) 2096 { 2097 pr_debug ("unregister %s\n", shortname); 2098 unregister_filesystem (&gadgetfs_type); 2099 } 2100 module_exit (cleanup); 2101 2102