1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * inode.c -- user mode filesystem api for usb gadget controllers 4 * 5 * Copyright (C) 2003-2004 David Brownell 6 * Copyright (C) 2003 Agilent Technologies 7 */ 8 9 10 /* #define VERBOSE_DEBUG */ 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/fs.h> 15 #include <linux/pagemap.h> 16 #include <linux/uts.h> 17 #include <linux/wait.h> 18 #include <linux/compiler.h> 19 #include <linux/uaccess.h> 20 #include <linux/sched.h> 21 #include <linux/slab.h> 22 #include <linux/poll.h> 23 #include <linux/mmu_context.h> 24 #include <linux/aio.h> 25 #include <linux/uio.h> 26 #include <linux/refcount.h> 27 #include <linux/delay.h> 28 #include <linux/device.h> 29 #include <linux/moduleparam.h> 30 31 #include <linux/usb/gadgetfs.h> 32 #include <linux/usb/gadget.h> 33 34 35 /* 36 * The gadgetfs API maps each endpoint to a file descriptor so that you 37 * can use standard synchronous read/write calls for I/O. There's some 38 * O_NONBLOCK and O_ASYNC/FASYNC style i/o support. Example usermode 39 * drivers show how this works in practice. You can also use AIO to 40 * eliminate I/O gaps between requests, to help when streaming data. 41 * 42 * Key parts that must be USB-specific are protocols defining how the 43 * read/write operations relate to the hardware state machines. There 44 * are two types of files. One type is for the device, implementing ep0. 45 * The other type is for each IN or OUT endpoint. In both cases, the 46 * user mode driver must configure the hardware before using it. 47 * 48 * - First, dev_config() is called when /dev/gadget/$CHIP is configured 49 * (by writing configuration and device descriptors). Afterwards it 50 * may serve as a source of device events, used to handle all control 51 * requests other than basic enumeration. 52 * 53 * - Then, after a SET_CONFIGURATION control request, ep_config() is 54 * called when each /dev/gadget/ep* file is configured (by writing 55 * endpoint descriptors). Afterwards these files are used to write() 56 * IN data or to read() OUT data. To halt the endpoint, a "wrong 57 * direction" request is issued (like reading an IN endpoint). 58 * 59 * Unlike "usbfs" the only ioctl()s are for things that are rare, and maybe 60 * not possible on all hardware. For example, precise fault handling with 61 * respect to data left in endpoint fifos after aborted operations; or 62 * selective clearing of endpoint halts, to implement SET_INTERFACE. 63 */ 64 65 #define DRIVER_DESC "USB Gadget filesystem" 66 #define DRIVER_VERSION "24 Aug 2004" 67 68 static const char driver_desc [] = DRIVER_DESC; 69 static const char shortname [] = "gadgetfs"; 70 71 MODULE_DESCRIPTION (DRIVER_DESC); 72 MODULE_AUTHOR ("David Brownell"); 73 MODULE_LICENSE ("GPL"); 74 75 static int ep_open(struct inode *, struct file *); 76 77 78 /*----------------------------------------------------------------------*/ 79 80 #define GADGETFS_MAGIC 0xaee71ee7 81 82 /* /dev/gadget/$CHIP represents ep0 and the whole device */ 83 enum ep0_state { 84 /* DISABLED is the initial state. */ 85 STATE_DEV_DISABLED = 0, 86 87 /* Only one open() of /dev/gadget/$CHIP; only one file tracks 88 * ep0/device i/o modes and binding to the controller. Driver 89 * must always write descriptors to initialize the device, then 90 * the device becomes UNCONNECTED until enumeration. 91 */ 92 STATE_DEV_OPENED, 93 94 /* From then on, ep0 fd is in either of two basic modes: 95 * - (UN)CONNECTED: read usb_gadgetfs_event(s) from it 96 * - SETUP: read/write will transfer control data and succeed; 97 * or if "wrong direction", performs protocol stall 98 */ 99 STATE_DEV_UNCONNECTED, 100 STATE_DEV_CONNECTED, 101 STATE_DEV_SETUP, 102 103 /* UNBOUND means the driver closed ep0, so the device won't be 104 * accessible again (DEV_DISABLED) until all fds are closed. 105 */ 106 STATE_DEV_UNBOUND, 107 }; 108 109 /* enough for the whole queue: most events invalidate others */ 110 #define N_EVENT 5 111 112 struct dev_data { 113 spinlock_t lock; 114 refcount_t count; 115 int udc_usage; 116 enum ep0_state state; /* P: lock */ 117 struct usb_gadgetfs_event event [N_EVENT]; 118 unsigned ev_next; 119 struct fasync_struct *fasync; 120 u8 current_config; 121 122 /* drivers reading ep0 MUST handle control requests (SETUP) 123 * reported that way; else the host will time out. 124 */ 125 unsigned usermode_setup : 1, 126 setup_in : 1, 127 setup_can_stall : 1, 128 setup_out_ready : 1, 129 setup_out_error : 1, 130 setup_abort : 1, 131 gadget_registered : 1; 132 unsigned setup_wLength; 133 134 /* the rest is basically write-once */ 135 struct usb_config_descriptor *config, *hs_config; 136 struct usb_device_descriptor *dev; 137 struct usb_request *req; 138 struct usb_gadget *gadget; 139 struct list_head epfiles; 140 void *buf; 141 wait_queue_head_t wait; 142 struct super_block *sb; 143 struct dentry *dentry; 144 145 /* except this scratch i/o buffer for ep0 */ 146 u8 rbuf [256]; 147 }; 148 149 static inline void get_dev (struct dev_data *data) 150 { 151 refcount_inc (&data->count); 152 } 153 154 static void put_dev (struct dev_data *data) 155 { 156 if (likely (!refcount_dec_and_test (&data->count))) 157 return; 158 /* needs no more cleanup */ 159 BUG_ON (waitqueue_active (&data->wait)); 160 kfree (data); 161 } 162 163 static struct dev_data *dev_new (void) 164 { 165 struct dev_data *dev; 166 167 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 168 if (!dev) 169 return NULL; 170 dev->state = STATE_DEV_DISABLED; 171 refcount_set (&dev->count, 1); 172 spin_lock_init (&dev->lock); 173 INIT_LIST_HEAD (&dev->epfiles); 174 init_waitqueue_head (&dev->wait); 175 return dev; 176 } 177 178 /*----------------------------------------------------------------------*/ 179 180 /* other /dev/gadget/$ENDPOINT files represent endpoints */ 181 enum ep_state { 182 STATE_EP_DISABLED = 0, 183 STATE_EP_READY, 184 STATE_EP_ENABLED, 185 STATE_EP_UNBOUND, 186 }; 187 188 struct ep_data { 189 struct mutex lock; 190 enum ep_state state; 191 refcount_t count; 192 struct dev_data *dev; 193 /* must hold dev->lock before accessing ep or req */ 194 struct usb_ep *ep; 195 struct usb_request *req; 196 ssize_t status; 197 char name [16]; 198 struct usb_endpoint_descriptor desc, hs_desc; 199 struct list_head epfiles; 200 wait_queue_head_t wait; 201 struct dentry *dentry; 202 }; 203 204 static inline void get_ep (struct ep_data *data) 205 { 206 refcount_inc (&data->count); 207 } 208 209 static void put_ep (struct ep_data *data) 210 { 211 if (likely (!refcount_dec_and_test (&data->count))) 212 return; 213 put_dev (data->dev); 214 /* needs no more cleanup */ 215 BUG_ON (!list_empty (&data->epfiles)); 216 BUG_ON (waitqueue_active (&data->wait)); 217 kfree (data); 218 } 219 220 /*----------------------------------------------------------------------*/ 221 222 /* most "how to use the hardware" policy choices are in userspace: 223 * mapping endpoint roles (which the driver needs) to the capabilities 224 * which the usb controller has. most of those capabilities are exposed 225 * implicitly, starting with the driver name and then endpoint names. 226 */ 227 228 static const char *CHIP; 229 230 /*----------------------------------------------------------------------*/ 231 232 /* NOTE: don't use dev_printk calls before binding to the gadget 233 * at the end of ep0 configuration, or after unbind. 234 */ 235 236 /* too wordy: dev_printk(level , &(d)->gadget->dev , fmt , ## args) */ 237 #define xprintk(d,level,fmt,args...) \ 238 printk(level "%s: " fmt , shortname , ## args) 239 240 #ifdef DEBUG 241 #define DBG(dev,fmt,args...) \ 242 xprintk(dev , KERN_DEBUG , fmt , ## args) 243 #else 244 #define DBG(dev,fmt,args...) \ 245 do { } while (0) 246 #endif /* DEBUG */ 247 248 #ifdef VERBOSE_DEBUG 249 #define VDEBUG DBG 250 #else 251 #define VDEBUG(dev,fmt,args...) \ 252 do { } while (0) 253 #endif /* DEBUG */ 254 255 #define ERROR(dev,fmt,args...) \ 256 xprintk(dev , KERN_ERR , fmt , ## args) 257 #define INFO(dev,fmt,args...) \ 258 xprintk(dev , KERN_INFO , fmt , ## args) 259 260 261 /*----------------------------------------------------------------------*/ 262 263 /* SYNCHRONOUS ENDPOINT OPERATIONS (bulk/intr/iso) 264 * 265 * After opening, configure non-control endpoints. Then use normal 266 * stream read() and write() requests; and maybe ioctl() to get more 267 * precise FIFO status when recovering from cancellation. 268 */ 269 270 static void epio_complete (struct usb_ep *ep, struct usb_request *req) 271 { 272 struct ep_data *epdata = ep->driver_data; 273 274 if (!req->context) 275 return; 276 if (req->status) 277 epdata->status = req->status; 278 else 279 epdata->status = req->actual; 280 complete ((struct completion *)req->context); 281 } 282 283 /* tasklock endpoint, returning when it's connected. 284 * still need dev->lock to use epdata->ep. 285 */ 286 static int 287 get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) 288 { 289 int val; 290 291 if (f_flags & O_NONBLOCK) { 292 if (!mutex_trylock(&epdata->lock)) 293 goto nonblock; 294 if (epdata->state != STATE_EP_ENABLED && 295 (!is_write || epdata->state != STATE_EP_READY)) { 296 mutex_unlock(&epdata->lock); 297 nonblock: 298 val = -EAGAIN; 299 } else 300 val = 0; 301 return val; 302 } 303 304 val = mutex_lock_interruptible(&epdata->lock); 305 if (val < 0) 306 return val; 307 308 switch (epdata->state) { 309 case STATE_EP_ENABLED: 310 return 0; 311 case STATE_EP_READY: /* not configured yet */ 312 if (is_write) 313 return 0; 314 // FALLTHRU 315 case STATE_EP_UNBOUND: /* clean disconnect */ 316 break; 317 // case STATE_EP_DISABLED: /* "can't happen" */ 318 default: /* error! */ 319 pr_debug ("%s: ep %p not available, state %d\n", 320 shortname, epdata, epdata->state); 321 } 322 mutex_unlock(&epdata->lock); 323 return -ENODEV; 324 } 325 326 static ssize_t 327 ep_io (struct ep_data *epdata, void *buf, unsigned len) 328 { 329 DECLARE_COMPLETION_ONSTACK (done); 330 int value; 331 332 spin_lock_irq (&epdata->dev->lock); 333 if (likely (epdata->ep != NULL)) { 334 struct usb_request *req = epdata->req; 335 336 req->context = &done; 337 req->complete = epio_complete; 338 req->buf = buf; 339 req->length = len; 340 value = usb_ep_queue (epdata->ep, req, GFP_ATOMIC); 341 } else 342 value = -ENODEV; 343 spin_unlock_irq (&epdata->dev->lock); 344 345 if (likely (value == 0)) { 346 value = wait_event_interruptible (done.wait, done.done); 347 if (value != 0) { 348 spin_lock_irq (&epdata->dev->lock); 349 if (likely (epdata->ep != NULL)) { 350 DBG (epdata->dev, "%s i/o interrupted\n", 351 epdata->name); 352 usb_ep_dequeue (epdata->ep, epdata->req); 353 spin_unlock_irq (&epdata->dev->lock); 354 355 wait_event (done.wait, done.done); 356 if (epdata->status == -ECONNRESET) 357 epdata->status = -EINTR; 358 } else { 359 spin_unlock_irq (&epdata->dev->lock); 360 361 DBG (epdata->dev, "endpoint gone\n"); 362 epdata->status = -ENODEV; 363 } 364 } 365 return epdata->status; 366 } 367 return value; 368 } 369 370 static int 371 ep_release (struct inode *inode, struct file *fd) 372 { 373 struct ep_data *data = fd->private_data; 374 int value; 375 376 value = mutex_lock_interruptible(&data->lock); 377 if (value < 0) 378 return value; 379 380 /* clean up if this can be reopened */ 381 if (data->state != STATE_EP_UNBOUND) { 382 data->state = STATE_EP_DISABLED; 383 data->desc.bDescriptorType = 0; 384 data->hs_desc.bDescriptorType = 0; 385 usb_ep_disable(data->ep); 386 } 387 mutex_unlock(&data->lock); 388 put_ep (data); 389 return 0; 390 } 391 392 static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) 393 { 394 struct ep_data *data = fd->private_data; 395 int status; 396 397 if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) 398 return status; 399 400 spin_lock_irq (&data->dev->lock); 401 if (likely (data->ep != NULL)) { 402 switch (code) { 403 case GADGETFS_FIFO_STATUS: 404 status = usb_ep_fifo_status (data->ep); 405 break; 406 case GADGETFS_FIFO_FLUSH: 407 usb_ep_fifo_flush (data->ep); 408 break; 409 case GADGETFS_CLEAR_HALT: 410 status = usb_ep_clear_halt (data->ep); 411 break; 412 default: 413 status = -ENOTTY; 414 } 415 } else 416 status = -ENODEV; 417 spin_unlock_irq (&data->dev->lock); 418 mutex_unlock(&data->lock); 419 return status; 420 } 421 422 /*----------------------------------------------------------------------*/ 423 424 /* ASYNCHRONOUS ENDPOINT I/O OPERATIONS (bulk/intr/iso) */ 425 426 struct kiocb_priv { 427 struct usb_request *req; 428 struct ep_data *epdata; 429 struct kiocb *iocb; 430 struct mm_struct *mm; 431 struct work_struct work; 432 void *buf; 433 struct iov_iter to; 434 const void *to_free; 435 unsigned actual; 436 }; 437 438 static int ep_aio_cancel(struct kiocb *iocb) 439 { 440 struct kiocb_priv *priv = iocb->private; 441 struct ep_data *epdata; 442 int value; 443 444 local_irq_disable(); 445 epdata = priv->epdata; 446 // spin_lock(&epdata->dev->lock); 447 if (likely(epdata && epdata->ep && priv->req)) 448 value = usb_ep_dequeue (epdata->ep, priv->req); 449 else 450 value = -EINVAL; 451 // spin_unlock(&epdata->dev->lock); 452 local_irq_enable(); 453 454 return value; 455 } 456 457 static void ep_user_copy_worker(struct work_struct *work) 458 { 459 struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); 460 struct mm_struct *mm = priv->mm; 461 struct kiocb *iocb = priv->iocb; 462 size_t ret; 463 464 use_mm(mm); 465 ret = copy_to_iter(priv->buf, priv->actual, &priv->to); 466 unuse_mm(mm); 467 if (!ret) 468 ret = -EFAULT; 469 470 /* completing the iocb can drop the ctx and mm, don't touch mm after */ 471 iocb->ki_complete(iocb, ret, ret); 472 473 kfree(priv->buf); 474 kfree(priv->to_free); 475 kfree(priv); 476 } 477 478 static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) 479 { 480 struct kiocb *iocb = req->context; 481 struct kiocb_priv *priv = iocb->private; 482 struct ep_data *epdata = priv->epdata; 483 484 /* lock against disconnect (and ideally, cancel) */ 485 spin_lock(&epdata->dev->lock); 486 priv->req = NULL; 487 priv->epdata = NULL; 488 489 /* if this was a write or a read returning no data then we 490 * don't need to copy anything to userspace, so we can 491 * complete the aio request immediately. 492 */ 493 if (priv->to_free == NULL || unlikely(req->actual == 0)) { 494 kfree(req->buf); 495 kfree(priv->to_free); 496 kfree(priv); 497 iocb->private = NULL; 498 /* aio_complete() reports bytes-transferred _and_ faults */ 499 500 iocb->ki_complete(iocb, req->actual ? req->actual : req->status, 501 req->status); 502 } else { 503 /* ep_copy_to_user() won't report both; we hide some faults */ 504 if (unlikely(0 != req->status)) 505 DBG(epdata->dev, "%s fault %d len %d\n", 506 ep->name, req->status, req->actual); 507 508 priv->buf = req->buf; 509 priv->actual = req->actual; 510 INIT_WORK(&priv->work, ep_user_copy_worker); 511 schedule_work(&priv->work); 512 } 513 514 usb_ep_free_request(ep, req); 515 spin_unlock(&epdata->dev->lock); 516 put_ep(epdata); 517 } 518 519 static ssize_t ep_aio(struct kiocb *iocb, 520 struct kiocb_priv *priv, 521 struct ep_data *epdata, 522 char *buf, 523 size_t len) 524 { 525 struct usb_request *req; 526 ssize_t value; 527 528 iocb->private = priv; 529 priv->iocb = iocb; 530 531 kiocb_set_cancel_fn(iocb, ep_aio_cancel); 532 get_ep(epdata); 533 priv->epdata = epdata; 534 priv->actual = 0; 535 priv->mm = current->mm; /* mm teardown waits for iocbs in exit_aio() */ 536 537 /* each kiocb is coupled to one usb_request, but we can't 538 * allocate or submit those if the host disconnected. 539 */ 540 spin_lock_irq(&epdata->dev->lock); 541 value = -ENODEV; 542 if (unlikely(epdata->ep == NULL)) 543 goto fail; 544 545 req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); 546 value = -ENOMEM; 547 if (unlikely(!req)) 548 goto fail; 549 550 priv->req = req; 551 req->buf = buf; 552 req->length = len; 553 req->complete = ep_aio_complete; 554 req->context = iocb; 555 value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); 556 if (unlikely(0 != value)) { 557 usb_ep_free_request(epdata->ep, req); 558 goto fail; 559 } 560 spin_unlock_irq(&epdata->dev->lock); 561 return -EIOCBQUEUED; 562 563 fail: 564 spin_unlock_irq(&epdata->dev->lock); 565 kfree(priv->to_free); 566 kfree(priv); 567 put_ep(epdata); 568 return value; 569 } 570 571 static ssize_t 572 ep_read_iter(struct kiocb *iocb, struct iov_iter *to) 573 { 574 struct file *file = iocb->ki_filp; 575 struct ep_data *epdata = file->private_data; 576 size_t len = iov_iter_count(to); 577 ssize_t value; 578 char *buf; 579 580 if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) 581 return value; 582 583 /* halt any endpoint by doing a "wrong direction" i/o call */ 584 if (usb_endpoint_dir_in(&epdata->desc)) { 585 if (usb_endpoint_xfer_isoc(&epdata->desc) || 586 !is_sync_kiocb(iocb)) { 587 mutex_unlock(&epdata->lock); 588 return -EINVAL; 589 } 590 DBG (epdata->dev, "%s halt\n", epdata->name); 591 spin_lock_irq(&epdata->dev->lock); 592 if (likely(epdata->ep != NULL)) 593 usb_ep_set_halt(epdata->ep); 594 spin_unlock_irq(&epdata->dev->lock); 595 mutex_unlock(&epdata->lock); 596 return -EBADMSG; 597 } 598 599 buf = kmalloc(len, GFP_KERNEL); 600 if (unlikely(!buf)) { 601 mutex_unlock(&epdata->lock); 602 return -ENOMEM; 603 } 604 if (is_sync_kiocb(iocb)) { 605 value = ep_io(epdata, buf, len); 606 if (value >= 0 && (copy_to_iter(buf, value, to) != value)) 607 value = -EFAULT; 608 } else { 609 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); 610 value = -ENOMEM; 611 if (!priv) 612 goto fail; 613 priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); 614 if (!priv->to_free) { 615 kfree(priv); 616 goto fail; 617 } 618 value = ep_aio(iocb, priv, epdata, buf, len); 619 if (value == -EIOCBQUEUED) 620 buf = NULL; 621 } 622 fail: 623 kfree(buf); 624 mutex_unlock(&epdata->lock); 625 return value; 626 } 627 628 static ssize_t ep_config(struct ep_data *, const char *, size_t); 629 630 static ssize_t 631 ep_write_iter(struct kiocb *iocb, struct iov_iter *from) 632 { 633 struct file *file = iocb->ki_filp; 634 struct ep_data *epdata = file->private_data; 635 size_t len = iov_iter_count(from); 636 bool configured; 637 ssize_t value; 638 char *buf; 639 640 if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) 641 return value; 642 643 configured = epdata->state == STATE_EP_ENABLED; 644 645 /* halt any endpoint by doing a "wrong direction" i/o call */ 646 if (configured && !usb_endpoint_dir_in(&epdata->desc)) { 647 if (usb_endpoint_xfer_isoc(&epdata->desc) || 648 !is_sync_kiocb(iocb)) { 649 mutex_unlock(&epdata->lock); 650 return -EINVAL; 651 } 652 DBG (epdata->dev, "%s halt\n", epdata->name); 653 spin_lock_irq(&epdata->dev->lock); 654 if (likely(epdata->ep != NULL)) 655 usb_ep_set_halt(epdata->ep); 656 spin_unlock_irq(&epdata->dev->lock); 657 mutex_unlock(&epdata->lock); 658 return -EBADMSG; 659 } 660 661 buf = kmalloc(len, GFP_KERNEL); 662 if (unlikely(!buf)) { 663 mutex_unlock(&epdata->lock); 664 return -ENOMEM; 665 } 666 667 if (unlikely(!copy_from_iter_full(buf, len, from))) { 668 value = -EFAULT; 669 goto out; 670 } 671 672 if (unlikely(!configured)) { 673 value = ep_config(epdata, buf, len); 674 } else if (is_sync_kiocb(iocb)) { 675 value = ep_io(epdata, buf, len); 676 } else { 677 struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); 678 value = -ENOMEM; 679 if (priv) { 680 value = ep_aio(iocb, priv, epdata, buf, len); 681 if (value == -EIOCBQUEUED) 682 buf = NULL; 683 } 684 } 685 out: 686 kfree(buf); 687 mutex_unlock(&epdata->lock); 688 return value; 689 } 690 691 /*----------------------------------------------------------------------*/ 692 693 /* used after endpoint configuration */ 694 static const struct file_operations ep_io_operations = { 695 .owner = THIS_MODULE, 696 697 .open = ep_open, 698 .release = ep_release, 699 .llseek = no_llseek, 700 .unlocked_ioctl = ep_ioctl, 701 .read_iter = ep_read_iter, 702 .write_iter = ep_write_iter, 703 }; 704 705 /* ENDPOINT INITIALIZATION 706 * 707 * fd = open ("/dev/gadget/$ENDPOINT", O_RDWR) 708 * status = write (fd, descriptors, sizeof descriptors) 709 * 710 * That write establishes the endpoint configuration, configuring 711 * the controller to process bulk, interrupt, or isochronous transfers 712 * at the right maxpacket size, and so on. 713 * 714 * The descriptors are message type 1, identified by a host order u32 715 * at the beginning of what's written. Descriptor order is: full/low 716 * speed descriptor, then optional high speed descriptor. 717 */ 718 static ssize_t 719 ep_config (struct ep_data *data, const char *buf, size_t len) 720 { 721 struct usb_ep *ep; 722 u32 tag; 723 int value, length = len; 724 725 if (data->state != STATE_EP_READY) { 726 value = -EL2HLT; 727 goto fail; 728 } 729 730 value = len; 731 if (len < USB_DT_ENDPOINT_SIZE + 4) 732 goto fail0; 733 734 /* we might need to change message format someday */ 735 memcpy(&tag, buf, 4); 736 if (tag != 1) { 737 DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); 738 goto fail0; 739 } 740 buf += 4; 741 len -= 4; 742 743 /* NOTE: audio endpoint extensions not accepted here; 744 * just don't include the extra bytes. 745 */ 746 747 /* full/low speed descriptor, then high speed */ 748 memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); 749 if (data->desc.bLength != USB_DT_ENDPOINT_SIZE 750 || data->desc.bDescriptorType != USB_DT_ENDPOINT) 751 goto fail0; 752 if (len != USB_DT_ENDPOINT_SIZE) { 753 if (len != 2 * USB_DT_ENDPOINT_SIZE) 754 goto fail0; 755 memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, 756 USB_DT_ENDPOINT_SIZE); 757 if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE 758 || data->hs_desc.bDescriptorType 759 != USB_DT_ENDPOINT) { 760 DBG(data->dev, "config %s, bad hs length or type\n", 761 data->name); 762 goto fail0; 763 } 764 } 765 766 spin_lock_irq (&data->dev->lock); 767 if (data->dev->state == STATE_DEV_UNBOUND) { 768 value = -ENOENT; 769 goto gone; 770 } else { 771 ep = data->ep; 772 if (ep == NULL) { 773 value = -ENODEV; 774 goto gone; 775 } 776 } 777 switch (data->dev->gadget->speed) { 778 case USB_SPEED_LOW: 779 case USB_SPEED_FULL: 780 ep->desc = &data->desc; 781 break; 782 case USB_SPEED_HIGH: 783 /* fails if caller didn't provide that descriptor... */ 784 ep->desc = &data->hs_desc; 785 break; 786 default: 787 DBG(data->dev, "unconnected, %s init abandoned\n", 788 data->name); 789 value = -EINVAL; 790 goto gone; 791 } 792 value = usb_ep_enable(ep); 793 if (value == 0) { 794 data->state = STATE_EP_ENABLED; 795 value = length; 796 } 797 gone: 798 spin_unlock_irq (&data->dev->lock); 799 if (value < 0) { 800 fail: 801 data->desc.bDescriptorType = 0; 802 data->hs_desc.bDescriptorType = 0; 803 } 804 return value; 805 fail0: 806 value = -EINVAL; 807 goto fail; 808 } 809 810 static int 811 ep_open (struct inode *inode, struct file *fd) 812 { 813 struct ep_data *data = inode->i_private; 814 int value = -EBUSY; 815 816 if (mutex_lock_interruptible(&data->lock) != 0) 817 return -EINTR; 818 spin_lock_irq (&data->dev->lock); 819 if (data->dev->state == STATE_DEV_UNBOUND) 820 value = -ENOENT; 821 else if (data->state == STATE_EP_DISABLED) { 822 value = 0; 823 data->state = STATE_EP_READY; 824 get_ep (data); 825 fd->private_data = data; 826 VDEBUG (data->dev, "%s ready\n", data->name); 827 } else 828 DBG (data->dev, "%s state %d\n", 829 data->name, data->state); 830 spin_unlock_irq (&data->dev->lock); 831 mutex_unlock(&data->lock); 832 return value; 833 } 834 835 /*----------------------------------------------------------------------*/ 836 837 /* EP0 IMPLEMENTATION can be partly in userspace. 838 * 839 * Drivers that use this facility receive various events, including 840 * control requests the kernel doesn't handle. Drivers that don't 841 * use this facility may be too simple-minded for real applications. 842 */ 843 844 static inline void ep0_readable (struct dev_data *dev) 845 { 846 wake_up (&dev->wait); 847 kill_fasync (&dev->fasync, SIGIO, POLL_IN); 848 } 849 850 static void clean_req (struct usb_ep *ep, struct usb_request *req) 851 { 852 struct dev_data *dev = ep->driver_data; 853 854 if (req->buf != dev->rbuf) { 855 kfree(req->buf); 856 req->buf = dev->rbuf; 857 } 858 req->complete = epio_complete; 859 dev->setup_out_ready = 0; 860 } 861 862 static void ep0_complete (struct usb_ep *ep, struct usb_request *req) 863 { 864 struct dev_data *dev = ep->driver_data; 865 unsigned long flags; 866 int free = 1; 867 868 /* for control OUT, data must still get to userspace */ 869 spin_lock_irqsave(&dev->lock, flags); 870 if (!dev->setup_in) { 871 dev->setup_out_error = (req->status != 0); 872 if (!dev->setup_out_error) 873 free = 0; 874 dev->setup_out_ready = 1; 875 ep0_readable (dev); 876 } 877 878 /* clean up as appropriate */ 879 if (free && req->buf != &dev->rbuf) 880 clean_req (ep, req); 881 req->complete = epio_complete; 882 spin_unlock_irqrestore(&dev->lock, flags); 883 } 884 885 static int setup_req (struct usb_ep *ep, struct usb_request *req, u16 len) 886 { 887 struct dev_data *dev = ep->driver_data; 888 889 if (dev->setup_out_ready) { 890 DBG (dev, "ep0 request busy!\n"); 891 return -EBUSY; 892 } 893 if (len > sizeof (dev->rbuf)) 894 req->buf = kmalloc(len, GFP_ATOMIC); 895 if (req->buf == NULL) { 896 req->buf = dev->rbuf; 897 return -ENOMEM; 898 } 899 req->complete = ep0_complete; 900 req->length = len; 901 req->zero = 0; 902 return 0; 903 } 904 905 static ssize_t 906 ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) 907 { 908 struct dev_data *dev = fd->private_data; 909 ssize_t retval; 910 enum ep0_state state; 911 912 spin_lock_irq (&dev->lock); 913 if (dev->state <= STATE_DEV_OPENED) { 914 retval = -EINVAL; 915 goto done; 916 } 917 918 /* report fd mode change before acting on it */ 919 if (dev->setup_abort) { 920 dev->setup_abort = 0; 921 retval = -EIDRM; 922 goto done; 923 } 924 925 /* control DATA stage */ 926 if ((state = dev->state) == STATE_DEV_SETUP) { 927 928 if (dev->setup_in) { /* stall IN */ 929 VDEBUG(dev, "ep0in stall\n"); 930 (void) usb_ep_set_halt (dev->gadget->ep0); 931 retval = -EL2HLT; 932 dev->state = STATE_DEV_CONNECTED; 933 934 } else if (len == 0) { /* ack SET_CONFIGURATION etc */ 935 struct usb_ep *ep = dev->gadget->ep0; 936 struct usb_request *req = dev->req; 937 938 if ((retval = setup_req (ep, req, 0)) == 0) { 939 ++dev->udc_usage; 940 spin_unlock_irq (&dev->lock); 941 retval = usb_ep_queue (ep, req, GFP_KERNEL); 942 spin_lock_irq (&dev->lock); 943 --dev->udc_usage; 944 } 945 dev->state = STATE_DEV_CONNECTED; 946 947 /* assume that was SET_CONFIGURATION */ 948 if (dev->current_config) { 949 unsigned power; 950 951 if (gadget_is_dualspeed(dev->gadget) 952 && (dev->gadget->speed 953 == USB_SPEED_HIGH)) 954 power = dev->hs_config->bMaxPower; 955 else 956 power = dev->config->bMaxPower; 957 usb_gadget_vbus_draw(dev->gadget, 2 * power); 958 } 959 960 } else { /* collect OUT data */ 961 if ((fd->f_flags & O_NONBLOCK) != 0 962 && !dev->setup_out_ready) { 963 retval = -EAGAIN; 964 goto done; 965 } 966 spin_unlock_irq (&dev->lock); 967 retval = wait_event_interruptible (dev->wait, 968 dev->setup_out_ready != 0); 969 970 /* FIXME state could change from under us */ 971 spin_lock_irq (&dev->lock); 972 if (retval) 973 goto done; 974 975 if (dev->state != STATE_DEV_SETUP) { 976 retval = -ECANCELED; 977 goto done; 978 } 979 dev->state = STATE_DEV_CONNECTED; 980 981 if (dev->setup_out_error) 982 retval = -EIO; 983 else { 984 len = min (len, (size_t)dev->req->actual); 985 ++dev->udc_usage; 986 spin_unlock_irq(&dev->lock); 987 if (copy_to_user (buf, dev->req->buf, len)) 988 retval = -EFAULT; 989 else 990 retval = len; 991 spin_lock_irq(&dev->lock); 992 --dev->udc_usage; 993 clean_req (dev->gadget->ep0, dev->req); 994 /* NOTE userspace can't yet choose to stall */ 995 } 996 } 997 goto done; 998 } 999 1000 /* else normal: return event data */ 1001 if (len < sizeof dev->event [0]) { 1002 retval = -EINVAL; 1003 goto done; 1004 } 1005 len -= len % sizeof (struct usb_gadgetfs_event); 1006 dev->usermode_setup = 1; 1007 1008 scan: 1009 /* return queued events right away */ 1010 if (dev->ev_next != 0) { 1011 unsigned i, n; 1012 1013 n = len / sizeof (struct usb_gadgetfs_event); 1014 if (dev->ev_next < n) 1015 n = dev->ev_next; 1016 1017 /* ep0 i/o has special semantics during STATE_DEV_SETUP */ 1018 for (i = 0; i < n; i++) { 1019 if (dev->event [i].type == GADGETFS_SETUP) { 1020 dev->state = STATE_DEV_SETUP; 1021 n = i + 1; 1022 break; 1023 } 1024 } 1025 spin_unlock_irq (&dev->lock); 1026 len = n * sizeof (struct usb_gadgetfs_event); 1027 if (copy_to_user (buf, &dev->event, len)) 1028 retval = -EFAULT; 1029 else 1030 retval = len; 1031 if (len > 0) { 1032 /* NOTE this doesn't guard against broken drivers; 1033 * concurrent ep0 readers may lose events. 1034 */ 1035 spin_lock_irq (&dev->lock); 1036 if (dev->ev_next > n) { 1037 memmove(&dev->event[0], &dev->event[n], 1038 sizeof (struct usb_gadgetfs_event) 1039 * (dev->ev_next - n)); 1040 } 1041 dev->ev_next -= n; 1042 spin_unlock_irq (&dev->lock); 1043 } 1044 return retval; 1045 } 1046 if (fd->f_flags & O_NONBLOCK) { 1047 retval = -EAGAIN; 1048 goto done; 1049 } 1050 1051 switch (state) { 1052 default: 1053 DBG (dev, "fail %s, state %d\n", __func__, state); 1054 retval = -ESRCH; 1055 break; 1056 case STATE_DEV_UNCONNECTED: 1057 case STATE_DEV_CONNECTED: 1058 spin_unlock_irq (&dev->lock); 1059 DBG (dev, "%s wait\n", __func__); 1060 1061 /* wait for events */ 1062 retval = wait_event_interruptible (dev->wait, 1063 dev->ev_next != 0); 1064 if (retval < 0) 1065 return retval; 1066 spin_lock_irq (&dev->lock); 1067 goto scan; 1068 } 1069 1070 done: 1071 spin_unlock_irq (&dev->lock); 1072 return retval; 1073 } 1074 1075 static struct usb_gadgetfs_event * 1076 next_event (struct dev_data *dev, enum usb_gadgetfs_event_type type) 1077 { 1078 struct usb_gadgetfs_event *event; 1079 unsigned i; 1080 1081 switch (type) { 1082 /* these events purge the queue */ 1083 case GADGETFS_DISCONNECT: 1084 if (dev->state == STATE_DEV_SETUP) 1085 dev->setup_abort = 1; 1086 // FALL THROUGH 1087 case GADGETFS_CONNECT: 1088 dev->ev_next = 0; 1089 break; 1090 case GADGETFS_SETUP: /* previous request timed out */ 1091 case GADGETFS_SUSPEND: /* same effect */ 1092 /* these events can't be repeated */ 1093 for (i = 0; i != dev->ev_next; i++) { 1094 if (dev->event [i].type != type) 1095 continue; 1096 DBG(dev, "discard old event[%d] %d\n", i, type); 1097 dev->ev_next--; 1098 if (i == dev->ev_next) 1099 break; 1100 /* indices start at zero, for simplicity */ 1101 memmove (&dev->event [i], &dev->event [i + 1], 1102 sizeof (struct usb_gadgetfs_event) 1103 * (dev->ev_next - i)); 1104 } 1105 break; 1106 default: 1107 BUG (); 1108 } 1109 VDEBUG(dev, "event[%d] = %d\n", dev->ev_next, type); 1110 event = &dev->event [dev->ev_next++]; 1111 BUG_ON (dev->ev_next > N_EVENT); 1112 memset (event, 0, sizeof *event); 1113 event->type = type; 1114 return event; 1115 } 1116 1117 static ssize_t 1118 ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) 1119 { 1120 struct dev_data *dev = fd->private_data; 1121 ssize_t retval = -ESRCH; 1122 1123 /* report fd mode change before acting on it */ 1124 if (dev->setup_abort) { 1125 dev->setup_abort = 0; 1126 retval = -EIDRM; 1127 1128 /* data and/or status stage for control request */ 1129 } else if (dev->state == STATE_DEV_SETUP) { 1130 1131 len = min_t(size_t, len, dev->setup_wLength); 1132 if (dev->setup_in) { 1133 retval = setup_req (dev->gadget->ep0, dev->req, len); 1134 if (retval == 0) { 1135 dev->state = STATE_DEV_CONNECTED; 1136 ++dev->udc_usage; 1137 spin_unlock_irq (&dev->lock); 1138 if (copy_from_user (dev->req->buf, buf, len)) 1139 retval = -EFAULT; 1140 else { 1141 if (len < dev->setup_wLength) 1142 dev->req->zero = 1; 1143 retval = usb_ep_queue ( 1144 dev->gadget->ep0, dev->req, 1145 GFP_KERNEL); 1146 } 1147 spin_lock_irq(&dev->lock); 1148 --dev->udc_usage; 1149 if (retval < 0) { 1150 clean_req (dev->gadget->ep0, dev->req); 1151 } else 1152 retval = len; 1153 1154 return retval; 1155 } 1156 1157 /* can stall some OUT transfers */ 1158 } else if (dev->setup_can_stall) { 1159 VDEBUG(dev, "ep0out stall\n"); 1160 (void) usb_ep_set_halt (dev->gadget->ep0); 1161 retval = -EL2HLT; 1162 dev->state = STATE_DEV_CONNECTED; 1163 } else { 1164 DBG(dev, "bogus ep0out stall!\n"); 1165 } 1166 } else 1167 DBG (dev, "fail %s, state %d\n", __func__, dev->state); 1168 1169 return retval; 1170 } 1171 1172 static int 1173 ep0_fasync (int f, struct file *fd, int on) 1174 { 1175 struct dev_data *dev = fd->private_data; 1176 // caller must F_SETOWN before signal delivery happens 1177 VDEBUG (dev, "%s %s\n", __func__, on ? "on" : "off"); 1178 return fasync_helper (f, fd, on, &dev->fasync); 1179 } 1180 1181 static struct usb_gadget_driver gadgetfs_driver; 1182 1183 static int 1184 dev_release (struct inode *inode, struct file *fd) 1185 { 1186 struct dev_data *dev = fd->private_data; 1187 1188 /* closing ep0 === shutdown all */ 1189 1190 if (dev->gadget_registered) { 1191 usb_gadget_unregister_driver (&gadgetfs_driver); 1192 dev->gadget_registered = false; 1193 } 1194 1195 /* at this point "good" hardware has disconnected the 1196 * device from USB; the host won't see it any more. 1197 * alternatively, all host requests will time out. 1198 */ 1199 1200 kfree (dev->buf); 1201 dev->buf = NULL; 1202 1203 /* other endpoints were all decoupled from this device */ 1204 spin_lock_irq(&dev->lock); 1205 dev->state = STATE_DEV_DISABLED; 1206 spin_unlock_irq(&dev->lock); 1207 1208 put_dev (dev); 1209 return 0; 1210 } 1211 1212 static unsigned int 1213 ep0_poll (struct file *fd, poll_table *wait) 1214 { 1215 struct dev_data *dev = fd->private_data; 1216 int mask = 0; 1217 1218 if (dev->state <= STATE_DEV_OPENED) 1219 return DEFAULT_POLLMASK; 1220 1221 poll_wait(fd, &dev->wait, wait); 1222 1223 spin_lock_irq (&dev->lock); 1224 1225 /* report fd mode change before acting on it */ 1226 if (dev->setup_abort) { 1227 dev->setup_abort = 0; 1228 mask = POLLHUP; 1229 goto out; 1230 } 1231 1232 if (dev->state == STATE_DEV_SETUP) { 1233 if (dev->setup_in || dev->setup_can_stall) 1234 mask = POLLOUT; 1235 } else { 1236 if (dev->ev_next != 0) 1237 mask = POLLIN; 1238 } 1239 out: 1240 spin_unlock_irq(&dev->lock); 1241 return mask; 1242 } 1243 1244 static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) 1245 { 1246 struct dev_data *dev = fd->private_data; 1247 struct usb_gadget *gadget = dev->gadget; 1248 long ret = -ENOTTY; 1249 1250 spin_lock_irq(&dev->lock); 1251 if (dev->state == STATE_DEV_OPENED || 1252 dev->state == STATE_DEV_UNBOUND) { 1253 /* Not bound to a UDC */ 1254 } else if (gadget->ops->ioctl) { 1255 ++dev->udc_usage; 1256 spin_unlock_irq(&dev->lock); 1257 1258 ret = gadget->ops->ioctl (gadget, code, value); 1259 1260 spin_lock_irq(&dev->lock); 1261 --dev->udc_usage; 1262 } 1263 spin_unlock_irq(&dev->lock); 1264 1265 return ret; 1266 } 1267 1268 /*----------------------------------------------------------------------*/ 1269 1270 /* The in-kernel gadget driver handles most ep0 issues, in particular 1271 * enumerating the single configuration (as provided from user space). 1272 * 1273 * Unrecognized ep0 requests may be handled in user space. 1274 */ 1275 1276 static void make_qualifier (struct dev_data *dev) 1277 { 1278 struct usb_qualifier_descriptor qual; 1279 struct usb_device_descriptor *desc; 1280 1281 qual.bLength = sizeof qual; 1282 qual.bDescriptorType = USB_DT_DEVICE_QUALIFIER; 1283 qual.bcdUSB = cpu_to_le16 (0x0200); 1284 1285 desc = dev->dev; 1286 qual.bDeviceClass = desc->bDeviceClass; 1287 qual.bDeviceSubClass = desc->bDeviceSubClass; 1288 qual.bDeviceProtocol = desc->bDeviceProtocol; 1289 1290 /* assumes ep0 uses the same value for both speeds ... */ 1291 qual.bMaxPacketSize0 = dev->gadget->ep0->maxpacket; 1292 1293 qual.bNumConfigurations = 1; 1294 qual.bRESERVED = 0; 1295 1296 memcpy (dev->rbuf, &qual, sizeof qual); 1297 } 1298 1299 static int 1300 config_buf (struct dev_data *dev, u8 type, unsigned index) 1301 { 1302 int len; 1303 int hs = 0; 1304 1305 /* only one configuration */ 1306 if (index > 0) 1307 return -EINVAL; 1308 1309 if (gadget_is_dualspeed(dev->gadget)) { 1310 hs = (dev->gadget->speed == USB_SPEED_HIGH); 1311 if (type == USB_DT_OTHER_SPEED_CONFIG) 1312 hs = !hs; 1313 } 1314 if (hs) { 1315 dev->req->buf = dev->hs_config; 1316 len = le16_to_cpu(dev->hs_config->wTotalLength); 1317 } else { 1318 dev->req->buf = dev->config; 1319 len = le16_to_cpu(dev->config->wTotalLength); 1320 } 1321 ((u8 *)dev->req->buf) [1] = type; 1322 return len; 1323 } 1324 1325 static int 1326 gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl) 1327 { 1328 struct dev_data *dev = get_gadget_data (gadget); 1329 struct usb_request *req = dev->req; 1330 int value = -EOPNOTSUPP; 1331 struct usb_gadgetfs_event *event; 1332 u16 w_value = le16_to_cpu(ctrl->wValue); 1333 u16 w_length = le16_to_cpu(ctrl->wLength); 1334 1335 spin_lock (&dev->lock); 1336 dev->setup_abort = 0; 1337 if (dev->state == STATE_DEV_UNCONNECTED) { 1338 if (gadget_is_dualspeed(gadget) 1339 && gadget->speed == USB_SPEED_HIGH 1340 && dev->hs_config == NULL) { 1341 spin_unlock(&dev->lock); 1342 ERROR (dev, "no high speed config??\n"); 1343 return -EINVAL; 1344 } 1345 1346 dev->state = STATE_DEV_CONNECTED; 1347 1348 INFO (dev, "connected\n"); 1349 event = next_event (dev, GADGETFS_CONNECT); 1350 event->u.speed = gadget->speed; 1351 ep0_readable (dev); 1352 1353 /* host may have given up waiting for response. we can miss control 1354 * requests handled lower down (device/endpoint status and features); 1355 * then ep0_{read,write} will report the wrong status. controller 1356 * driver will have aborted pending i/o. 1357 */ 1358 } else if (dev->state == STATE_DEV_SETUP) 1359 dev->setup_abort = 1; 1360 1361 req->buf = dev->rbuf; 1362 req->context = NULL; 1363 value = -EOPNOTSUPP; 1364 switch (ctrl->bRequest) { 1365 1366 case USB_REQ_GET_DESCRIPTOR: 1367 if (ctrl->bRequestType != USB_DIR_IN) 1368 goto unrecognized; 1369 switch (w_value >> 8) { 1370 1371 case USB_DT_DEVICE: 1372 value = min (w_length, (u16) sizeof *dev->dev); 1373 dev->dev->bMaxPacketSize0 = dev->gadget->ep0->maxpacket; 1374 req->buf = dev->dev; 1375 break; 1376 case USB_DT_DEVICE_QUALIFIER: 1377 if (!dev->hs_config) 1378 break; 1379 value = min (w_length, (u16) 1380 sizeof (struct usb_qualifier_descriptor)); 1381 make_qualifier (dev); 1382 break; 1383 case USB_DT_OTHER_SPEED_CONFIG: 1384 // FALLTHROUGH 1385 case USB_DT_CONFIG: 1386 value = config_buf (dev, 1387 w_value >> 8, 1388 w_value & 0xff); 1389 if (value >= 0) 1390 value = min (w_length, (u16) value); 1391 break; 1392 case USB_DT_STRING: 1393 goto unrecognized; 1394 1395 default: // all others are errors 1396 break; 1397 } 1398 break; 1399 1400 /* currently one config, two speeds */ 1401 case USB_REQ_SET_CONFIGURATION: 1402 if (ctrl->bRequestType != 0) 1403 goto unrecognized; 1404 if (0 == (u8) w_value) { 1405 value = 0; 1406 dev->current_config = 0; 1407 usb_gadget_vbus_draw(gadget, 8 /* mA */ ); 1408 // user mode expected to disable endpoints 1409 } else { 1410 u8 config, power; 1411 1412 if (gadget_is_dualspeed(gadget) 1413 && gadget->speed == USB_SPEED_HIGH) { 1414 config = dev->hs_config->bConfigurationValue; 1415 power = dev->hs_config->bMaxPower; 1416 } else { 1417 config = dev->config->bConfigurationValue; 1418 power = dev->config->bMaxPower; 1419 } 1420 1421 if (config == (u8) w_value) { 1422 value = 0; 1423 dev->current_config = config; 1424 usb_gadget_vbus_draw(gadget, 2 * power); 1425 } 1426 } 1427 1428 /* report SET_CONFIGURATION like any other control request, 1429 * except that usermode may not stall this. the next 1430 * request mustn't be allowed start until this finishes: 1431 * endpoints and threads set up, etc. 1432 * 1433 * NOTE: older PXA hardware (before PXA 255: without UDCCFR) 1434 * has bad/racey automagic that prevents synchronizing here. 1435 * even kernel mode drivers often miss them. 1436 */ 1437 if (value == 0) { 1438 INFO (dev, "configuration #%d\n", dev->current_config); 1439 usb_gadget_set_state(gadget, USB_STATE_CONFIGURED); 1440 if (dev->usermode_setup) { 1441 dev->setup_can_stall = 0; 1442 goto delegate; 1443 } 1444 } 1445 break; 1446 1447 #ifndef CONFIG_USB_PXA25X 1448 /* PXA automagically handles this request too */ 1449 case USB_REQ_GET_CONFIGURATION: 1450 if (ctrl->bRequestType != 0x80) 1451 goto unrecognized; 1452 *(u8 *)req->buf = dev->current_config; 1453 value = min (w_length, (u16) 1); 1454 break; 1455 #endif 1456 1457 default: 1458 unrecognized: 1459 VDEBUG (dev, "%s req%02x.%02x v%04x i%04x l%d\n", 1460 dev->usermode_setup ? "delegate" : "fail", 1461 ctrl->bRequestType, ctrl->bRequest, 1462 w_value, le16_to_cpu(ctrl->wIndex), w_length); 1463 1464 /* if there's an ep0 reader, don't stall */ 1465 if (dev->usermode_setup) { 1466 dev->setup_can_stall = 1; 1467 delegate: 1468 dev->setup_in = (ctrl->bRequestType & USB_DIR_IN) 1469 ? 1 : 0; 1470 dev->setup_wLength = w_length; 1471 dev->setup_out_ready = 0; 1472 dev->setup_out_error = 0; 1473 value = 0; 1474 1475 /* read DATA stage for OUT right away */ 1476 if (unlikely (!dev->setup_in && w_length)) { 1477 value = setup_req (gadget->ep0, dev->req, 1478 w_length); 1479 if (value < 0) 1480 break; 1481 1482 ++dev->udc_usage; 1483 spin_unlock (&dev->lock); 1484 value = usb_ep_queue (gadget->ep0, dev->req, 1485 GFP_KERNEL); 1486 spin_lock (&dev->lock); 1487 --dev->udc_usage; 1488 if (value < 0) { 1489 clean_req (gadget->ep0, dev->req); 1490 break; 1491 } 1492 1493 /* we can't currently stall these */ 1494 dev->setup_can_stall = 0; 1495 } 1496 1497 /* state changes when reader collects event */ 1498 event = next_event (dev, GADGETFS_SETUP); 1499 event->u.setup = *ctrl; 1500 ep0_readable (dev); 1501 spin_unlock (&dev->lock); 1502 return 0; 1503 } 1504 } 1505 1506 /* proceed with data transfer and status phases? */ 1507 if (value >= 0 && dev->state != STATE_DEV_SETUP) { 1508 req->length = value; 1509 req->zero = value < w_length; 1510 1511 ++dev->udc_usage; 1512 spin_unlock (&dev->lock); 1513 value = usb_ep_queue (gadget->ep0, req, GFP_KERNEL); 1514 spin_lock(&dev->lock); 1515 --dev->udc_usage; 1516 spin_unlock(&dev->lock); 1517 if (value < 0) { 1518 DBG (dev, "ep_queue --> %d\n", value); 1519 req->status = 0; 1520 } 1521 return value; 1522 } 1523 1524 /* device stalls when value < 0 */ 1525 spin_unlock (&dev->lock); 1526 return value; 1527 } 1528 1529 static void destroy_ep_files (struct dev_data *dev) 1530 { 1531 DBG (dev, "%s %d\n", __func__, dev->state); 1532 1533 /* dev->state must prevent interference */ 1534 spin_lock_irq (&dev->lock); 1535 while (!list_empty(&dev->epfiles)) { 1536 struct ep_data *ep; 1537 struct inode *parent; 1538 struct dentry *dentry; 1539 1540 /* break link to FS */ 1541 ep = list_first_entry (&dev->epfiles, struct ep_data, epfiles); 1542 list_del_init (&ep->epfiles); 1543 spin_unlock_irq (&dev->lock); 1544 1545 dentry = ep->dentry; 1546 ep->dentry = NULL; 1547 parent = d_inode(dentry->d_parent); 1548 1549 /* break link to controller */ 1550 mutex_lock(&ep->lock); 1551 if (ep->state == STATE_EP_ENABLED) 1552 (void) usb_ep_disable (ep->ep); 1553 ep->state = STATE_EP_UNBOUND; 1554 usb_ep_free_request (ep->ep, ep->req); 1555 ep->ep = NULL; 1556 mutex_unlock(&ep->lock); 1557 1558 wake_up (&ep->wait); 1559 put_ep (ep); 1560 1561 /* break link to dcache */ 1562 inode_lock(parent); 1563 d_delete (dentry); 1564 dput (dentry); 1565 inode_unlock(parent); 1566 1567 spin_lock_irq (&dev->lock); 1568 } 1569 spin_unlock_irq (&dev->lock); 1570 } 1571 1572 1573 static struct dentry * 1574 gadgetfs_create_file (struct super_block *sb, char const *name, 1575 void *data, const struct file_operations *fops); 1576 1577 static int activate_ep_files (struct dev_data *dev) 1578 { 1579 struct usb_ep *ep; 1580 struct ep_data *data; 1581 1582 gadget_for_each_ep (ep, dev->gadget) { 1583 1584 data = kzalloc(sizeof(*data), GFP_KERNEL); 1585 if (!data) 1586 goto enomem0; 1587 data->state = STATE_EP_DISABLED; 1588 mutex_init(&data->lock); 1589 init_waitqueue_head (&data->wait); 1590 1591 strncpy (data->name, ep->name, sizeof (data->name) - 1); 1592 refcount_set (&data->count, 1); 1593 data->dev = dev; 1594 get_dev (dev); 1595 1596 data->ep = ep; 1597 ep->driver_data = data; 1598 1599 data->req = usb_ep_alloc_request (ep, GFP_KERNEL); 1600 if (!data->req) 1601 goto enomem1; 1602 1603 data->dentry = gadgetfs_create_file (dev->sb, data->name, 1604 data, &ep_io_operations); 1605 if (!data->dentry) 1606 goto enomem2; 1607 list_add_tail (&data->epfiles, &dev->epfiles); 1608 } 1609 return 0; 1610 1611 enomem2: 1612 usb_ep_free_request (ep, data->req); 1613 enomem1: 1614 put_dev (dev); 1615 kfree (data); 1616 enomem0: 1617 DBG (dev, "%s enomem\n", __func__); 1618 destroy_ep_files (dev); 1619 return -ENOMEM; 1620 } 1621 1622 static void 1623 gadgetfs_unbind (struct usb_gadget *gadget) 1624 { 1625 struct dev_data *dev = get_gadget_data (gadget); 1626 1627 DBG (dev, "%s\n", __func__); 1628 1629 spin_lock_irq (&dev->lock); 1630 dev->state = STATE_DEV_UNBOUND; 1631 while (dev->udc_usage > 0) { 1632 spin_unlock_irq(&dev->lock); 1633 usleep_range(1000, 2000); 1634 spin_lock_irq(&dev->lock); 1635 } 1636 spin_unlock_irq (&dev->lock); 1637 1638 destroy_ep_files (dev); 1639 gadget->ep0->driver_data = NULL; 1640 set_gadget_data (gadget, NULL); 1641 1642 /* we've already been disconnected ... no i/o is active */ 1643 if (dev->req) 1644 usb_ep_free_request (gadget->ep0, dev->req); 1645 DBG (dev, "%s done\n", __func__); 1646 put_dev (dev); 1647 } 1648 1649 static struct dev_data *the_device; 1650 1651 static int gadgetfs_bind(struct usb_gadget *gadget, 1652 struct usb_gadget_driver *driver) 1653 { 1654 struct dev_data *dev = the_device; 1655 1656 if (!dev) 1657 return -ESRCH; 1658 if (0 != strcmp (CHIP, gadget->name)) { 1659 pr_err("%s expected %s controller not %s\n", 1660 shortname, CHIP, gadget->name); 1661 return -ENODEV; 1662 } 1663 1664 set_gadget_data (gadget, dev); 1665 dev->gadget = gadget; 1666 gadget->ep0->driver_data = dev; 1667 1668 /* preallocate control response and buffer */ 1669 dev->req = usb_ep_alloc_request (gadget->ep0, GFP_KERNEL); 1670 if (!dev->req) 1671 goto enomem; 1672 dev->req->context = NULL; 1673 dev->req->complete = epio_complete; 1674 1675 if (activate_ep_files (dev) < 0) 1676 goto enomem; 1677 1678 INFO (dev, "bound to %s driver\n", gadget->name); 1679 spin_lock_irq(&dev->lock); 1680 dev->state = STATE_DEV_UNCONNECTED; 1681 spin_unlock_irq(&dev->lock); 1682 get_dev (dev); 1683 return 0; 1684 1685 enomem: 1686 gadgetfs_unbind (gadget); 1687 return -ENOMEM; 1688 } 1689 1690 static void 1691 gadgetfs_disconnect (struct usb_gadget *gadget) 1692 { 1693 struct dev_data *dev = get_gadget_data (gadget); 1694 unsigned long flags; 1695 1696 spin_lock_irqsave (&dev->lock, flags); 1697 if (dev->state == STATE_DEV_UNCONNECTED) 1698 goto exit; 1699 dev->state = STATE_DEV_UNCONNECTED; 1700 1701 INFO (dev, "disconnected\n"); 1702 next_event (dev, GADGETFS_DISCONNECT); 1703 ep0_readable (dev); 1704 exit: 1705 spin_unlock_irqrestore (&dev->lock, flags); 1706 } 1707 1708 static void 1709 gadgetfs_suspend (struct usb_gadget *gadget) 1710 { 1711 struct dev_data *dev = get_gadget_data (gadget); 1712 unsigned long flags; 1713 1714 INFO (dev, "suspended from state %d\n", dev->state); 1715 spin_lock_irqsave(&dev->lock, flags); 1716 switch (dev->state) { 1717 case STATE_DEV_SETUP: // VERY odd... host died?? 1718 case STATE_DEV_CONNECTED: 1719 case STATE_DEV_UNCONNECTED: 1720 next_event (dev, GADGETFS_SUSPEND); 1721 ep0_readable (dev); 1722 /* FALLTHROUGH */ 1723 default: 1724 break; 1725 } 1726 spin_unlock_irqrestore(&dev->lock, flags); 1727 } 1728 1729 static struct usb_gadget_driver gadgetfs_driver = { 1730 .function = (char *) driver_desc, 1731 .bind = gadgetfs_bind, 1732 .unbind = gadgetfs_unbind, 1733 .setup = gadgetfs_setup, 1734 .reset = gadgetfs_disconnect, 1735 .disconnect = gadgetfs_disconnect, 1736 .suspend = gadgetfs_suspend, 1737 1738 .driver = { 1739 .name = (char *) shortname, 1740 }, 1741 }; 1742 1743 /*----------------------------------------------------------------------*/ 1744 /* DEVICE INITIALIZATION 1745 * 1746 * fd = open ("/dev/gadget/$CHIP", O_RDWR) 1747 * status = write (fd, descriptors, sizeof descriptors) 1748 * 1749 * That write establishes the device configuration, so the kernel can 1750 * bind to the controller ... guaranteeing it can handle enumeration 1751 * at all necessary speeds. Descriptor order is: 1752 * 1753 * . message tag (u32, host order) ... for now, must be zero; it 1754 * would change to support features like multi-config devices 1755 * . full/low speed config ... all wTotalLength bytes (with interface, 1756 * class, altsetting, endpoint, and other descriptors) 1757 * . high speed config ... all descriptors, for high speed operation; 1758 * this one's optional except for high-speed hardware 1759 * . device descriptor 1760 * 1761 * Endpoints are not yet enabled. Drivers must wait until device 1762 * configuration and interface altsetting changes create 1763 * the need to configure (or unconfigure) them. 1764 * 1765 * After initialization, the device stays active for as long as that 1766 * $CHIP file is open. Events must then be read from that descriptor, 1767 * such as configuration notifications. 1768 */ 1769 1770 static int is_valid_config(struct usb_config_descriptor *config, 1771 unsigned int total) 1772 { 1773 return config->bDescriptorType == USB_DT_CONFIG 1774 && config->bLength == USB_DT_CONFIG_SIZE 1775 && total >= USB_DT_CONFIG_SIZE 1776 && config->bConfigurationValue != 0 1777 && (config->bmAttributes & USB_CONFIG_ATT_ONE) != 0 1778 && (config->bmAttributes & USB_CONFIG_ATT_WAKEUP) == 0; 1779 /* FIXME if gadget->is_otg, _must_ include an otg descriptor */ 1780 /* FIXME check lengths: walk to end */ 1781 } 1782 1783 static ssize_t 1784 dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) 1785 { 1786 struct dev_data *dev = fd->private_data; 1787 ssize_t value = len, length = len; 1788 unsigned total; 1789 u32 tag; 1790 char *kbuf; 1791 1792 spin_lock_irq(&dev->lock); 1793 if (dev->state > STATE_DEV_OPENED) { 1794 value = ep0_write(fd, buf, len, ptr); 1795 spin_unlock_irq(&dev->lock); 1796 return value; 1797 } 1798 spin_unlock_irq(&dev->lock); 1799 1800 if ((len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) || 1801 (len > PAGE_SIZE * 4)) 1802 return -EINVAL; 1803 1804 /* we might need to change message format someday */ 1805 if (copy_from_user (&tag, buf, 4)) 1806 return -EFAULT; 1807 if (tag != 0) 1808 return -EINVAL; 1809 buf += 4; 1810 length -= 4; 1811 1812 kbuf = memdup_user(buf, length); 1813 if (IS_ERR(kbuf)) 1814 return PTR_ERR(kbuf); 1815 1816 spin_lock_irq (&dev->lock); 1817 value = -EINVAL; 1818 if (dev->buf) { 1819 kfree(kbuf); 1820 goto fail; 1821 } 1822 dev->buf = kbuf; 1823 1824 /* full or low speed config */ 1825 dev->config = (void *) kbuf; 1826 total = le16_to_cpu(dev->config->wTotalLength); 1827 if (!is_valid_config(dev->config, total) || 1828 total > length - USB_DT_DEVICE_SIZE) 1829 goto fail; 1830 kbuf += total; 1831 length -= total; 1832 1833 /* optional high speed config */ 1834 if (kbuf [1] == USB_DT_CONFIG) { 1835 dev->hs_config = (void *) kbuf; 1836 total = le16_to_cpu(dev->hs_config->wTotalLength); 1837 if (!is_valid_config(dev->hs_config, total) || 1838 total > length - USB_DT_DEVICE_SIZE) 1839 goto fail; 1840 kbuf += total; 1841 length -= total; 1842 } else { 1843 dev->hs_config = NULL; 1844 } 1845 1846 /* could support multiple configs, using another encoding! */ 1847 1848 /* device descriptor (tweaked for paranoia) */ 1849 if (length != USB_DT_DEVICE_SIZE) 1850 goto fail; 1851 dev->dev = (void *)kbuf; 1852 if (dev->dev->bLength != USB_DT_DEVICE_SIZE 1853 || dev->dev->bDescriptorType != USB_DT_DEVICE 1854 || dev->dev->bNumConfigurations != 1) 1855 goto fail; 1856 dev->dev->bcdUSB = cpu_to_le16 (0x0200); 1857 1858 /* triggers gadgetfs_bind(); then we can enumerate. */ 1859 spin_unlock_irq (&dev->lock); 1860 if (dev->hs_config) 1861 gadgetfs_driver.max_speed = USB_SPEED_HIGH; 1862 else 1863 gadgetfs_driver.max_speed = USB_SPEED_FULL; 1864 1865 value = usb_gadget_probe_driver(&gadgetfs_driver); 1866 if (value != 0) { 1867 kfree (dev->buf); 1868 dev->buf = NULL; 1869 } else { 1870 /* at this point "good" hardware has for the first time 1871 * let the USB the host see us. alternatively, if users 1872 * unplug/replug that will clear all the error state. 1873 * 1874 * note: everything running before here was guaranteed 1875 * to choke driver model style diagnostics. from here 1876 * on, they can work ... except in cleanup paths that 1877 * kick in after the ep0 descriptor is closed. 1878 */ 1879 value = len; 1880 dev->gadget_registered = true; 1881 } 1882 return value; 1883 1884 fail: 1885 spin_unlock_irq (&dev->lock); 1886 pr_debug ("%s: %s fail %zd, %p\n", shortname, __func__, value, dev); 1887 kfree (dev->buf); 1888 dev->buf = NULL; 1889 return value; 1890 } 1891 1892 static int 1893 dev_open (struct inode *inode, struct file *fd) 1894 { 1895 struct dev_data *dev = inode->i_private; 1896 int value = -EBUSY; 1897 1898 spin_lock_irq(&dev->lock); 1899 if (dev->state == STATE_DEV_DISABLED) { 1900 dev->ev_next = 0; 1901 dev->state = STATE_DEV_OPENED; 1902 fd->private_data = dev; 1903 get_dev (dev); 1904 value = 0; 1905 } 1906 spin_unlock_irq(&dev->lock); 1907 return value; 1908 } 1909 1910 static const struct file_operations ep0_operations = { 1911 .llseek = no_llseek, 1912 1913 .open = dev_open, 1914 .read = ep0_read, 1915 .write = dev_config, 1916 .fasync = ep0_fasync, 1917 .poll = ep0_poll, 1918 .unlocked_ioctl = dev_ioctl, 1919 .release = dev_release, 1920 }; 1921 1922 /*----------------------------------------------------------------------*/ 1923 1924 /* FILESYSTEM AND SUPERBLOCK OPERATIONS 1925 * 1926 * Mounting the filesystem creates a controller file, used first for 1927 * device configuration then later for event monitoring. 1928 */ 1929 1930 1931 /* FIXME PAM etc could set this security policy without mount options 1932 * if epfiles inherited ownership and permissons from ep0 ... 1933 */ 1934 1935 static unsigned default_uid; 1936 static unsigned default_gid; 1937 static unsigned default_perm = S_IRUSR | S_IWUSR; 1938 1939 module_param (default_uid, uint, 0644); 1940 module_param (default_gid, uint, 0644); 1941 module_param (default_perm, uint, 0644); 1942 1943 1944 static struct inode * 1945 gadgetfs_make_inode (struct super_block *sb, 1946 void *data, const struct file_operations *fops, 1947 int mode) 1948 { 1949 struct inode *inode = new_inode (sb); 1950 1951 if (inode) { 1952 inode->i_ino = get_next_ino(); 1953 inode->i_mode = mode; 1954 inode->i_uid = make_kuid(&init_user_ns, default_uid); 1955 inode->i_gid = make_kgid(&init_user_ns, default_gid); 1956 inode->i_atime = inode->i_mtime = inode->i_ctime 1957 = current_time(inode); 1958 inode->i_private = data; 1959 inode->i_fop = fops; 1960 } 1961 return inode; 1962 } 1963 1964 /* creates in fs root directory, so non-renamable and non-linkable. 1965 * so inode and dentry are paired, until device reconfig. 1966 */ 1967 static struct dentry * 1968 gadgetfs_create_file (struct super_block *sb, char const *name, 1969 void *data, const struct file_operations *fops) 1970 { 1971 struct dentry *dentry; 1972 struct inode *inode; 1973 1974 dentry = d_alloc_name(sb->s_root, name); 1975 if (!dentry) 1976 return NULL; 1977 1978 inode = gadgetfs_make_inode (sb, data, fops, 1979 S_IFREG | (default_perm & S_IRWXUGO)); 1980 if (!inode) { 1981 dput(dentry); 1982 return NULL; 1983 } 1984 d_add (dentry, inode); 1985 return dentry; 1986 } 1987 1988 static const struct super_operations gadget_fs_operations = { 1989 .statfs = simple_statfs, 1990 .drop_inode = generic_delete_inode, 1991 }; 1992 1993 static int 1994 gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) 1995 { 1996 struct inode *inode; 1997 struct dev_data *dev; 1998 1999 if (the_device) 2000 return -ESRCH; 2001 2002 CHIP = usb_get_gadget_udc_name(); 2003 if (!CHIP) 2004 return -ENODEV; 2005 2006 /* superblock */ 2007 sb->s_blocksize = PAGE_SIZE; 2008 sb->s_blocksize_bits = PAGE_SHIFT; 2009 sb->s_magic = GADGETFS_MAGIC; 2010 sb->s_op = &gadget_fs_operations; 2011 sb->s_time_gran = 1; 2012 2013 /* root inode */ 2014 inode = gadgetfs_make_inode (sb, 2015 NULL, &simple_dir_operations, 2016 S_IFDIR | S_IRUGO | S_IXUGO); 2017 if (!inode) 2018 goto Enomem; 2019 inode->i_op = &simple_dir_inode_operations; 2020 if (!(sb->s_root = d_make_root (inode))) 2021 goto Enomem; 2022 2023 /* the ep0 file is named after the controller we expect; 2024 * user mode code can use it for sanity checks, like we do. 2025 */ 2026 dev = dev_new (); 2027 if (!dev) 2028 goto Enomem; 2029 2030 dev->sb = sb; 2031 dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); 2032 if (!dev->dentry) { 2033 put_dev(dev); 2034 goto Enomem; 2035 } 2036 2037 /* other endpoint files are available after hardware setup, 2038 * from binding to a controller. 2039 */ 2040 the_device = dev; 2041 return 0; 2042 2043 Enomem: 2044 return -ENOMEM; 2045 } 2046 2047 /* "mount -t gadgetfs path /dev/gadget" ends up here */ 2048 static struct dentry * 2049 gadgetfs_mount (struct file_system_type *t, int flags, 2050 const char *path, void *opts) 2051 { 2052 return mount_single (t, flags, opts, gadgetfs_fill_super); 2053 } 2054 2055 static void 2056 gadgetfs_kill_sb (struct super_block *sb) 2057 { 2058 kill_litter_super (sb); 2059 if (the_device) { 2060 put_dev (the_device); 2061 the_device = NULL; 2062 } 2063 kfree(CHIP); 2064 CHIP = NULL; 2065 } 2066 2067 /*----------------------------------------------------------------------*/ 2068 2069 static struct file_system_type gadgetfs_type = { 2070 .owner = THIS_MODULE, 2071 .name = shortname, 2072 .mount = gadgetfs_mount, 2073 .kill_sb = gadgetfs_kill_sb, 2074 }; 2075 MODULE_ALIAS_FS("gadgetfs"); 2076 2077 /*----------------------------------------------------------------------*/ 2078 2079 static int __init init (void) 2080 { 2081 int status; 2082 2083 status = register_filesystem (&gadgetfs_type); 2084 if (status == 0) 2085 pr_info ("%s: %s, version " DRIVER_VERSION "\n", 2086 shortname, driver_desc); 2087 return status; 2088 } 2089 module_init (init); 2090 2091 static void __exit cleanup (void) 2092 { 2093 pr_debug ("unregister %s\n", shortname); 2094 unregister_filesystem (&gadgetfs_type); 2095 } 2096 module_exit (cleanup); 2097 2098