1 /* 2 * Toshiba TC86C001 ("Goku-S") USB Device Controller driver 3 * 4 * Copyright (C) 2000-2002 Lineo 5 * by Stuart Lynne, Tom Rushworth, and Bruce Balden 6 * Copyright (C) 2002 Toshiba Corporation 7 * Copyright (C) 2003 MontaVista Software (source@mvista.com) 8 * 9 * This file is licensed under the terms of the GNU General Public 10 * License version 2. This program is licensed "as is" without any 11 * warranty of any kind, whether express or implied. 12 */ 13 14 /* 15 * This device has ep0 and three semi-configurable bulk/interrupt endpoints. 16 * 17 * - Endpoint numbering is fixed: ep{1,2,3}-bulk 18 * - Gadget drivers can choose ep maxpacket (8/16/32/64) 19 * - Gadget drivers can choose direction (IN, OUT) 20 * - DMA works with ep1 (OUT transfers) and ep2 (IN transfers). 21 */ 22 23 // #define VERBOSE /* extra debug messages (success too) */ 24 // #define USB_TRACE /* packet-level success messages */ 25 26 #include <linux/kernel.h> 27 #include <linux/module.h> 28 #include <linux/pci.h> 29 #include <linux/delay.h> 30 #include <linux/ioport.h> 31 #include <linux/slab.h> 32 #include <linux/errno.h> 33 #include <linux/timer.h> 34 #include <linux/list.h> 35 #include <linux/interrupt.h> 36 #include <linux/proc_fs.h> 37 #include <linux/seq_file.h> 38 #include <linux/device.h> 39 #include <linux/usb/ch9.h> 40 #include <linux/usb/gadget.h> 41 #include <linux/prefetch.h> 42 43 #include <asm/byteorder.h> 44 #include <asm/io.h> 45 #include <asm/irq.h> 46 #include <asm/unaligned.h> 47 48 49 #include "goku_udc.h" 50 51 #define DRIVER_DESC "TC86C001 USB Device Controller" 52 #define DRIVER_VERSION "30-Oct 2003" 53 54 static const char driver_name [] = "goku_udc"; 55 static const char driver_desc [] = DRIVER_DESC; 56 57 MODULE_AUTHOR("source@mvista.com"); 58 MODULE_DESCRIPTION(DRIVER_DESC); 59 MODULE_LICENSE("GPL"); 60 61 62 /* 63 * IN dma behaves ok under testing, though the IN-dma abort paths don't 64 * seem to behave quite as expected. Used by default. 65 * 66 * OUT dma documents design problems handling the common "short packet" 67 * transfer termination policy; it couldn't be enabled by default, even 68 * if the OUT-dma abort problems had a resolution. 69 */ 70 static unsigned use_dma = 1; 71 72 #if 0 73 //#include <linux/moduleparam.h> 74 /* "modprobe goku_udc use_dma=1" etc 75 * 0 to disable dma 76 * 1 to use IN dma only (normal operation) 77 * 2 to use IN and OUT dma 78 */ 79 module_param(use_dma, uint, S_IRUGO); 80 #endif 81 82 /*-------------------------------------------------------------------------*/ 83 84 static void nuke(struct goku_ep *, int status); 85 86 static inline void 87 command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum) 88 { 89 writel(COMMAND_EP(epnum) | command, ®s->Command); 90 udelay(300); 91 } 92 93 static int 94 goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) 95 { 96 struct goku_udc *dev; 97 struct goku_ep *ep; 98 u32 mode; 99 u16 max; 100 unsigned long flags; 101 102 ep = container_of(_ep, struct goku_ep, ep); 103 if (!_ep || !desc 104 || desc->bDescriptorType != USB_DT_ENDPOINT) 105 return -EINVAL; 106 dev = ep->dev; 107 if (ep == &dev->ep[0]) 108 return -EINVAL; 109 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN) 110 return -ESHUTDOWN; 111 if (ep->num != usb_endpoint_num(desc)) 112 return -EINVAL; 113 114 switch (usb_endpoint_type(desc)) { 115 case USB_ENDPOINT_XFER_BULK: 116 case USB_ENDPOINT_XFER_INT: 117 break; 118 default: 119 return -EINVAL; 120 } 121 122 if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK) 123 != EPxSTATUS_EP_INVALID) 124 return -EBUSY; 125 126 /* enabling the no-toggle interrupt mode would need an api hook */ 127 mode = 0; 128 max = get_unaligned_le16(&desc->wMaxPacketSize); 129 switch (max) { 130 case 64: mode++; 131 case 32: mode++; 132 case 16: mode++; 133 case 8: mode <<= 3; 134 break; 135 default: 136 return -EINVAL; 137 } 138 mode |= 2 << 1; /* bulk, or intr-with-toggle */ 139 140 /* ep1/ep2 dma direction is chosen early; it works in the other 141 * direction, with pio. be cautious with out-dma. 142 */ 143 ep->is_in = usb_endpoint_dir_in(desc); 144 if (ep->is_in) { 145 mode |= 1; 146 ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT); 147 } else { 148 ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT); 149 if (ep->dma) 150 DBG(dev, "%s out-dma hides short packets\n", 151 ep->ep.name); 152 } 153 154 spin_lock_irqsave(&ep->dev->lock, flags); 155 156 /* ep1 and ep2 can do double buffering and/or dma */ 157 if (ep->num < 3) { 158 struct goku_udc_regs __iomem *regs = ep->dev->regs; 159 u32 tmp; 160 161 /* double buffer except (for now) with pio in */ 162 tmp = ((ep->dma || !ep->is_in) 163 ? 0x10 /* double buffered */ 164 : 0x11 /* single buffer */ 165 ) << ep->num; 166 tmp |= readl(®s->EPxSingle); 167 writel(tmp, ®s->EPxSingle); 168 169 tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num; 170 tmp |= readl(®s->EPxBCS); 171 writel(tmp, ®s->EPxBCS); 172 } 173 writel(mode, ep->reg_mode); 174 command(ep->dev->regs, COMMAND_RESET, ep->num); 175 ep->ep.maxpacket = max; 176 ep->stopped = 0; 177 ep->ep.desc = desc; 178 spin_unlock_irqrestore(&ep->dev->lock, flags); 179 180 DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name, 181 ep->is_in ? "IN" : "OUT", 182 ep->dma ? "dma" : "pio", 183 max); 184 185 return 0; 186 } 187 188 static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep) 189 { 190 struct goku_udc *dev = ep->dev; 191 192 if (regs) { 193 command(regs, COMMAND_INVALID, ep->num); 194 if (ep->num) { 195 if (ep->num == UDC_MSTWR_ENDPOINT) 196 dev->int_enable &= ~(INT_MSTWREND 197 |INT_MSTWRTMOUT); 198 else if (ep->num == UDC_MSTRD_ENDPOINT) 199 dev->int_enable &= ~INT_MSTRDEND; 200 dev->int_enable &= ~INT_EPxDATASET (ep->num); 201 } else 202 dev->int_enable &= ~INT_EP0; 203 writel(dev->int_enable, ®s->int_enable); 204 readl(®s->int_enable); 205 if (ep->num < 3) { 206 struct goku_udc_regs __iomem *r = ep->dev->regs; 207 u32 tmp; 208 209 tmp = readl(&r->EPxSingle); 210 tmp &= ~(0x11 << ep->num); 211 writel(tmp, &r->EPxSingle); 212 213 tmp = readl(&r->EPxBCS); 214 tmp &= ~(0x11 << ep->num); 215 writel(tmp, &r->EPxBCS); 216 } 217 /* reset dma in case we're still using it */ 218 if (ep->dma) { 219 u32 master; 220 221 master = readl(®s->dma_master) & MST_RW_BITS; 222 if (ep->num == UDC_MSTWR_ENDPOINT) { 223 master &= ~MST_W_BITS; 224 master |= MST_WR_RESET; 225 } else { 226 master &= ~MST_R_BITS; 227 master |= MST_RD_RESET; 228 } 229 writel(master, ®s->dma_master); 230 } 231 } 232 233 usb_ep_set_maxpacket_limit(&ep->ep, MAX_FIFO_SIZE); 234 ep->ep.desc = NULL; 235 ep->stopped = 1; 236 ep->irqs = 0; 237 ep->dma = 0; 238 } 239 240 static int goku_ep_disable(struct usb_ep *_ep) 241 { 242 struct goku_ep *ep; 243 struct goku_udc *dev; 244 unsigned long flags; 245 246 ep = container_of(_ep, struct goku_ep, ep); 247 if (!_ep || !ep->ep.desc) 248 return -ENODEV; 249 dev = ep->dev; 250 if (dev->ep0state == EP0_SUSPEND) 251 return -EBUSY; 252 253 VDBG(dev, "disable %s\n", _ep->name); 254 255 spin_lock_irqsave(&dev->lock, flags); 256 nuke(ep, -ESHUTDOWN); 257 ep_reset(dev->regs, ep); 258 spin_unlock_irqrestore(&dev->lock, flags); 259 260 return 0; 261 } 262 263 /*-------------------------------------------------------------------------*/ 264 265 static struct usb_request * 266 goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) 267 { 268 struct goku_request *req; 269 270 if (!_ep) 271 return NULL; 272 req = kzalloc(sizeof *req, gfp_flags); 273 if (!req) 274 return NULL; 275 276 INIT_LIST_HEAD(&req->queue); 277 return &req->req; 278 } 279 280 static void 281 goku_free_request(struct usb_ep *_ep, struct usb_request *_req) 282 { 283 struct goku_request *req; 284 285 if (!_ep || !_req) 286 return; 287 288 req = container_of(_req, struct goku_request, req); 289 WARN_ON(!list_empty(&req->queue)); 290 kfree(req); 291 } 292 293 /*-------------------------------------------------------------------------*/ 294 295 static void 296 done(struct goku_ep *ep, struct goku_request *req, int status) 297 { 298 struct goku_udc *dev; 299 unsigned stopped = ep->stopped; 300 301 list_del_init(&req->queue); 302 303 if (likely(req->req.status == -EINPROGRESS)) 304 req->req.status = status; 305 else 306 status = req->req.status; 307 308 dev = ep->dev; 309 310 if (ep->dma) 311 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); 312 313 #ifndef USB_TRACE 314 if (status && status != -ESHUTDOWN) 315 #endif 316 VDBG(dev, "complete %s req %p stat %d len %u/%u\n", 317 ep->ep.name, &req->req, status, 318 req->req.actual, req->req.length); 319 320 /* don't modify queue heads during completion callback */ 321 ep->stopped = 1; 322 spin_unlock(&dev->lock); 323 usb_gadget_giveback_request(&ep->ep, &req->req); 324 spin_lock(&dev->lock); 325 ep->stopped = stopped; 326 } 327 328 /*-------------------------------------------------------------------------*/ 329 330 static inline int 331 write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max) 332 { 333 unsigned length, count; 334 335 length = min(req->req.length - req->req.actual, max); 336 req->req.actual += length; 337 338 count = length; 339 while (likely(count--)) 340 writel(*buf++, fifo); 341 return length; 342 } 343 344 // return: 0 = still running, 1 = completed, negative = errno 345 static int write_fifo(struct goku_ep *ep, struct goku_request *req) 346 { 347 struct goku_udc *dev = ep->dev; 348 u32 tmp; 349 u8 *buf; 350 unsigned count; 351 int is_last; 352 353 tmp = readl(&dev->regs->DataSet); 354 buf = req->req.buf + req->req.actual; 355 prefetch(buf); 356 357 dev = ep->dev; 358 if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN)) 359 return -EL2HLT; 360 361 /* NOTE: just single-buffered PIO-IN for now. */ 362 if (unlikely((tmp & DATASET_A(ep->num)) != 0)) 363 return 0; 364 365 /* clear our "packet available" irq */ 366 if (ep->num != 0) 367 writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status); 368 369 count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket); 370 371 /* last packet often short (sometimes a zlp, especially on ep0) */ 372 if (unlikely(count != ep->ep.maxpacket)) { 373 writel(~(1<<ep->num), &dev->regs->EOP); 374 if (ep->num == 0) { 375 dev->ep[0].stopped = 1; 376 dev->ep0state = EP0_STATUS; 377 } 378 is_last = 1; 379 } else { 380 if (likely(req->req.length != req->req.actual) 381 || req->req.zero) 382 is_last = 0; 383 else 384 is_last = 1; 385 } 386 #if 0 /* printk seemed to trash is_last...*/ 387 //#ifdef USB_TRACE 388 VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n", 389 ep->ep.name, count, is_last ? "/last" : "", 390 req->req.length - req->req.actual, req); 391 #endif 392 393 /* requests complete when all IN data is in the FIFO, 394 * or sometimes later, if a zlp was needed. 395 */ 396 if (is_last) { 397 done(ep, req, 0); 398 return 1; 399 } 400 401 return 0; 402 } 403 404 static int read_fifo(struct goku_ep *ep, struct goku_request *req) 405 { 406 struct goku_udc_regs __iomem *regs; 407 u32 size, set; 408 u8 *buf; 409 unsigned bufferspace, is_short, dbuff; 410 411 regs = ep->dev->regs; 412 top: 413 buf = req->req.buf + req->req.actual; 414 prefetchw(buf); 415 416 if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT)) 417 return -EL2HLT; 418 419 dbuff = (ep->num == 1 || ep->num == 2); 420 do { 421 /* ack dataset irq matching the status we'll handle */ 422 if (ep->num != 0) 423 writel(~INT_EPxDATASET(ep->num), ®s->int_status); 424 425 set = readl(®s->DataSet) & DATASET_AB(ep->num); 426 size = readl(®s->EPxSizeLA[ep->num]); 427 bufferspace = req->req.length - req->req.actual; 428 429 /* usually do nothing without an OUT packet */ 430 if (likely(ep->num != 0 || bufferspace != 0)) { 431 if (unlikely(set == 0)) 432 break; 433 /* use ep1/ep2 double-buffering for OUT */ 434 if (!(size & PACKET_ACTIVE)) 435 size = readl(®s->EPxSizeLB[ep->num]); 436 if (!(size & PACKET_ACTIVE)) /* "can't happen" */ 437 break; 438 size &= DATASIZE; /* EPxSizeH == 0 */ 439 440 /* ep0out no-out-data case for set_config, etc */ 441 } else 442 size = 0; 443 444 /* read all bytes from this packet */ 445 req->req.actual += size; 446 is_short = (size < ep->ep.maxpacket); 447 #ifdef USB_TRACE 448 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n", 449 ep->ep.name, size, is_short ? "/S" : "", 450 req, req->req.actual, req->req.length); 451 #endif 452 while (likely(size-- != 0)) { 453 u8 byte = (u8) readl(ep->reg_fifo); 454 455 if (unlikely(bufferspace == 0)) { 456 /* this happens when the driver's buffer 457 * is smaller than what the host sent. 458 * discard the extra data in this packet. 459 */ 460 if (req->req.status != -EOVERFLOW) 461 DBG(ep->dev, "%s overflow %u\n", 462 ep->ep.name, size); 463 req->req.status = -EOVERFLOW; 464 } else { 465 *buf++ = byte; 466 bufferspace--; 467 } 468 } 469 470 /* completion */ 471 if (unlikely(is_short || req->req.actual == req->req.length)) { 472 if (unlikely(ep->num == 0)) { 473 /* non-control endpoints now usable? */ 474 if (ep->dev->req_config) 475 writel(ep->dev->configured 476 ? USBSTATE_CONFIGURED 477 : 0, 478 ®s->UsbState); 479 /* ep0out status stage */ 480 writel(~(1<<0), ®s->EOP); 481 ep->stopped = 1; 482 ep->dev->ep0state = EP0_STATUS; 483 } 484 done(ep, req, 0); 485 486 /* empty the second buffer asap */ 487 if (dbuff && !list_empty(&ep->queue)) { 488 req = list_entry(ep->queue.next, 489 struct goku_request, queue); 490 goto top; 491 } 492 return 1; 493 } 494 } while (dbuff); 495 return 0; 496 } 497 498 static inline void 499 pio_irq_enable(struct goku_udc *dev, 500 struct goku_udc_regs __iomem *regs, int epnum) 501 { 502 dev->int_enable |= INT_EPxDATASET (epnum); 503 writel(dev->int_enable, ®s->int_enable); 504 /* write may still be posted */ 505 } 506 507 static inline void 508 pio_irq_disable(struct goku_udc *dev, 509 struct goku_udc_regs __iomem *regs, int epnum) 510 { 511 dev->int_enable &= ~INT_EPxDATASET (epnum); 512 writel(dev->int_enable, ®s->int_enable); 513 /* write may still be posted */ 514 } 515 516 static inline void 517 pio_advance(struct goku_ep *ep) 518 { 519 struct goku_request *req; 520 521 if (unlikely(list_empty (&ep->queue))) 522 return; 523 req = list_entry(ep->queue.next, struct goku_request, queue); 524 (ep->is_in ? write_fifo : read_fifo)(ep, req); 525 } 526 527 528 /*-------------------------------------------------------------------------*/ 529 530 // return: 0 = q running, 1 = q stopped, negative = errno 531 static int start_dma(struct goku_ep *ep, struct goku_request *req) 532 { 533 struct goku_udc_regs __iomem *regs = ep->dev->regs; 534 u32 master; 535 u32 start = req->req.dma; 536 u32 end = start + req->req.length - 1; 537 538 master = readl(®s->dma_master) & MST_RW_BITS; 539 540 /* re-init the bits affecting IN dma; careful with zlps */ 541 if (likely(ep->is_in)) { 542 if (unlikely(master & MST_RD_ENA)) { 543 DBG (ep->dev, "start, IN active dma %03x!!\n", 544 master); 545 // return -EL2HLT; 546 } 547 writel(end, ®s->in_dma_end); 548 writel(start, ®s->in_dma_start); 549 550 master &= ~MST_R_BITS; 551 if (unlikely(req->req.length == 0)) 552 master = MST_RD_ENA | MST_RD_EOPB; 553 else if ((req->req.length % ep->ep.maxpacket) != 0 554 || req->req.zero) 555 master = MST_RD_ENA | MST_EOPB_ENA; 556 else 557 master = MST_RD_ENA | MST_EOPB_DIS; 558 559 ep->dev->int_enable |= INT_MSTRDEND; 560 561 /* Goku DMA-OUT merges short packets, which plays poorly with 562 * protocols where short packets mark the transfer boundaries. 563 * The chip supports a nonstandard policy with INT_MSTWRTMOUT, 564 * ending transfers after 3 SOFs; we don't turn it on. 565 */ 566 } else { 567 if (unlikely(master & MST_WR_ENA)) { 568 DBG (ep->dev, "start, OUT active dma %03x!!\n", 569 master); 570 // return -EL2HLT; 571 } 572 writel(end, ®s->out_dma_end); 573 writel(start, ®s->out_dma_start); 574 575 master &= ~MST_W_BITS; 576 master |= MST_WR_ENA | MST_TIMEOUT_DIS; 577 578 ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT; 579 } 580 581 writel(master, ®s->dma_master); 582 writel(ep->dev->int_enable, ®s->int_enable); 583 return 0; 584 } 585 586 static void dma_advance(struct goku_udc *dev, struct goku_ep *ep) 587 { 588 struct goku_request *req; 589 struct goku_udc_regs __iomem *regs = ep->dev->regs; 590 u32 master; 591 592 master = readl(®s->dma_master); 593 594 if (unlikely(list_empty(&ep->queue))) { 595 stop: 596 if (ep->is_in) 597 dev->int_enable &= ~INT_MSTRDEND; 598 else 599 dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT); 600 writel(dev->int_enable, ®s->int_enable); 601 return; 602 } 603 req = list_entry(ep->queue.next, struct goku_request, queue); 604 605 /* normal hw dma completion (not abort) */ 606 if (likely(ep->is_in)) { 607 if (unlikely(master & MST_RD_ENA)) 608 return; 609 req->req.actual = readl(®s->in_dma_current); 610 } else { 611 if (unlikely(master & MST_WR_ENA)) 612 return; 613 614 /* hardware merges short packets, and also hides packet 615 * overruns. a partial packet MAY be in the fifo here. 616 */ 617 req->req.actual = readl(®s->out_dma_current); 618 } 619 req->req.actual -= req->req.dma; 620 req->req.actual++; 621 622 #ifdef USB_TRACE 623 VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n", 624 ep->ep.name, ep->is_in ? "IN" : "OUT", 625 req->req.actual, req->req.length, req); 626 #endif 627 done(ep, req, 0); 628 if (list_empty(&ep->queue)) 629 goto stop; 630 req = list_entry(ep->queue.next, struct goku_request, queue); 631 (void) start_dma(ep, req); 632 } 633 634 static void abort_dma(struct goku_ep *ep, int status) 635 { 636 struct goku_udc_regs __iomem *regs = ep->dev->regs; 637 struct goku_request *req; 638 u32 curr, master; 639 640 /* NAK future host requests, hoping the implicit delay lets the 641 * dma engine finish reading (or writing) its latest packet and 642 * empty the dma buffer (up to 16 bytes). 643 * 644 * This avoids needing to clean up a partial packet in the fifo; 645 * we can't do that for IN without side effects to HALT and TOGGLE. 646 */ 647 command(regs, COMMAND_FIFO_DISABLE, ep->num); 648 req = list_entry(ep->queue.next, struct goku_request, queue); 649 master = readl(®s->dma_master) & MST_RW_BITS; 650 651 /* FIXME using these resets isn't usably documented. this may 652 * not work unless it's followed by disabling the endpoint. 653 * 654 * FIXME the OUT reset path doesn't even behave consistently. 655 */ 656 if (ep->is_in) { 657 if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0)) 658 goto finished; 659 curr = readl(®s->in_dma_current); 660 661 writel(curr, ®s->in_dma_end); 662 writel(curr, ®s->in_dma_start); 663 664 master &= ~MST_R_BITS; 665 master |= MST_RD_RESET; 666 writel(master, ®s->dma_master); 667 668 if (readl(®s->dma_master) & MST_RD_ENA) 669 DBG(ep->dev, "IN dma active after reset!\n"); 670 671 } else { 672 if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0)) 673 goto finished; 674 curr = readl(®s->out_dma_current); 675 676 writel(curr, ®s->out_dma_end); 677 writel(curr, ®s->out_dma_start); 678 679 master &= ~MST_W_BITS; 680 master |= MST_WR_RESET; 681 writel(master, ®s->dma_master); 682 683 if (readl(®s->dma_master) & MST_WR_ENA) 684 DBG(ep->dev, "OUT dma active after reset!\n"); 685 } 686 req->req.actual = (curr - req->req.dma) + 1; 687 req->req.status = status; 688 689 VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name, 690 ep->is_in ? "IN" : "OUT", 691 req->req.actual, req->req.length); 692 693 command(regs, COMMAND_FIFO_ENABLE, ep->num); 694 695 return; 696 697 finished: 698 /* dma already completed; no abort needed */ 699 command(regs, COMMAND_FIFO_ENABLE, ep->num); 700 req->req.actual = req->req.length; 701 req->req.status = 0; 702 } 703 704 /*-------------------------------------------------------------------------*/ 705 706 static int 707 goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) 708 { 709 struct goku_request *req; 710 struct goku_ep *ep; 711 struct goku_udc *dev; 712 unsigned long flags; 713 int status; 714 715 /* always require a cpu-view buffer so pio works */ 716 req = container_of(_req, struct goku_request, req); 717 if (unlikely(!_req || !_req->complete 718 || !_req->buf || !list_empty(&req->queue))) 719 return -EINVAL; 720 ep = container_of(_ep, struct goku_ep, ep); 721 if (unlikely(!_ep || (!ep->ep.desc && ep->num != 0))) 722 return -EINVAL; 723 dev = ep->dev; 724 if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)) 725 return -ESHUTDOWN; 726 727 /* can't touch registers when suspended */ 728 if (dev->ep0state == EP0_SUSPEND) 729 return -EBUSY; 730 731 /* set up dma mapping in case the caller didn't */ 732 if (ep->dma) { 733 status = usb_gadget_map_request(&dev->gadget, &req->req, 734 ep->is_in); 735 if (status) 736 return status; 737 } 738 739 #ifdef USB_TRACE 740 VDBG(dev, "%s queue req %p, len %u buf %p\n", 741 _ep->name, _req, _req->length, _req->buf); 742 #endif 743 744 spin_lock_irqsave(&dev->lock, flags); 745 746 _req->status = -EINPROGRESS; 747 _req->actual = 0; 748 749 /* for ep0 IN without premature status, zlp is required and 750 * writing EOP starts the status stage (OUT). 751 */ 752 if (unlikely(ep->num == 0 && ep->is_in)) 753 _req->zero = 1; 754 755 /* kickstart this i/o queue? */ 756 status = 0; 757 if (list_empty(&ep->queue) && likely(!ep->stopped)) { 758 /* dma: done after dma completion IRQ (or error) 759 * pio: done after last fifo operation 760 */ 761 if (ep->dma) 762 status = start_dma(ep, req); 763 else 764 status = (ep->is_in ? write_fifo : read_fifo)(ep, req); 765 766 if (unlikely(status != 0)) { 767 if (status > 0) 768 status = 0; 769 req = NULL; 770 } 771 772 } /* else pio or dma irq handler advances the queue. */ 773 774 if (likely(req != NULL)) 775 list_add_tail(&req->queue, &ep->queue); 776 777 if (likely(!list_empty(&ep->queue)) 778 && likely(ep->num != 0) 779 && !ep->dma 780 && !(dev->int_enable & INT_EPxDATASET (ep->num))) 781 pio_irq_enable(dev, dev->regs, ep->num); 782 783 spin_unlock_irqrestore(&dev->lock, flags); 784 785 /* pci writes may still be posted */ 786 return status; 787 } 788 789 /* dequeue ALL requests */ 790 static void nuke(struct goku_ep *ep, int status) 791 { 792 struct goku_request *req; 793 794 ep->stopped = 1; 795 if (list_empty(&ep->queue)) 796 return; 797 if (ep->dma) 798 abort_dma(ep, status); 799 while (!list_empty(&ep->queue)) { 800 req = list_entry(ep->queue.next, struct goku_request, queue); 801 done(ep, req, status); 802 } 803 } 804 805 /* dequeue JUST ONE request */ 806 static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req) 807 { 808 struct goku_request *req; 809 struct goku_ep *ep; 810 struct goku_udc *dev; 811 unsigned long flags; 812 813 ep = container_of(_ep, struct goku_ep, ep); 814 if (!_ep || !_req || (!ep->ep.desc && ep->num != 0)) 815 return -EINVAL; 816 dev = ep->dev; 817 if (!dev->driver) 818 return -ESHUTDOWN; 819 820 /* we can't touch (dma) registers when suspended */ 821 if (dev->ep0state == EP0_SUSPEND) 822 return -EBUSY; 823 824 VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name, 825 ep->is_in ? "IN" : "OUT", 826 ep->dma ? "dma" : "pio", 827 _req); 828 829 spin_lock_irqsave(&dev->lock, flags); 830 831 /* make sure it's actually queued on this endpoint */ 832 list_for_each_entry (req, &ep->queue, queue) { 833 if (&req->req == _req) 834 break; 835 } 836 if (&req->req != _req) { 837 spin_unlock_irqrestore (&dev->lock, flags); 838 return -EINVAL; 839 } 840 841 if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) { 842 abort_dma(ep, -ECONNRESET); 843 done(ep, req, -ECONNRESET); 844 dma_advance(dev, ep); 845 } else if (!list_empty(&req->queue)) 846 done(ep, req, -ECONNRESET); 847 else 848 req = NULL; 849 spin_unlock_irqrestore(&dev->lock, flags); 850 851 return req ? 0 : -EOPNOTSUPP; 852 } 853 854 /*-------------------------------------------------------------------------*/ 855 856 static void goku_clear_halt(struct goku_ep *ep) 857 { 858 // assert (ep->num !=0) 859 VDBG(ep->dev, "%s clear halt\n", ep->ep.name); 860 command(ep->dev->regs, COMMAND_SETDATA0, ep->num); 861 command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num); 862 if (ep->stopped) { 863 ep->stopped = 0; 864 if (ep->dma) { 865 struct goku_request *req; 866 867 if (list_empty(&ep->queue)) 868 return; 869 req = list_entry(ep->queue.next, struct goku_request, 870 queue); 871 (void) start_dma(ep, req); 872 } else 873 pio_advance(ep); 874 } 875 } 876 877 static int goku_set_halt(struct usb_ep *_ep, int value) 878 { 879 struct goku_ep *ep; 880 unsigned long flags; 881 int retval = 0; 882 883 if (!_ep) 884 return -ENODEV; 885 ep = container_of (_ep, struct goku_ep, ep); 886 887 if (ep->num == 0) { 888 if (value) { 889 ep->dev->ep0state = EP0_STALL; 890 ep->dev->ep[0].stopped = 1; 891 } else 892 return -EINVAL; 893 894 /* don't change EPxSTATUS_EP_INVALID to READY */ 895 } else if (!ep->ep.desc) { 896 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name); 897 return -EINVAL; 898 } 899 900 spin_lock_irqsave(&ep->dev->lock, flags); 901 if (!list_empty(&ep->queue)) 902 retval = -EAGAIN; 903 else if (ep->is_in && value 904 /* data in (either) packet buffer? */ 905 && (readl(&ep->dev->regs->DataSet) 906 & DATASET_AB(ep->num))) 907 retval = -EAGAIN; 908 else if (!value) 909 goku_clear_halt(ep); 910 else { 911 ep->stopped = 1; 912 VDBG(ep->dev, "%s set halt\n", ep->ep.name); 913 command(ep->dev->regs, COMMAND_STALL, ep->num); 914 readl(ep->reg_status); 915 } 916 spin_unlock_irqrestore(&ep->dev->lock, flags); 917 return retval; 918 } 919 920 static int goku_fifo_status(struct usb_ep *_ep) 921 { 922 struct goku_ep *ep; 923 struct goku_udc_regs __iomem *regs; 924 u32 size; 925 926 if (!_ep) 927 return -ENODEV; 928 ep = container_of(_ep, struct goku_ep, ep); 929 930 /* size is only reported sanely for OUT */ 931 if (ep->is_in) 932 return -EOPNOTSUPP; 933 934 /* ignores 16-byte dma buffer; SizeH == 0 */ 935 regs = ep->dev->regs; 936 size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE; 937 size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE; 938 VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size); 939 return size; 940 } 941 942 static void goku_fifo_flush(struct usb_ep *_ep) 943 { 944 struct goku_ep *ep; 945 struct goku_udc_regs __iomem *regs; 946 u32 size; 947 948 if (!_ep) 949 return; 950 ep = container_of(_ep, struct goku_ep, ep); 951 VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name); 952 953 /* don't change EPxSTATUS_EP_INVALID to READY */ 954 if (!ep->ep.desc && ep->num != 0) { 955 DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name); 956 return; 957 } 958 959 regs = ep->dev->regs; 960 size = readl(®s->EPxSizeLA[ep->num]); 961 size &= DATASIZE; 962 963 /* Non-desirable behavior: FIFO_CLEAR also clears the 964 * endpoint halt feature. For OUT, we _could_ just read 965 * the bytes out (PIO, if !ep->dma); for in, no choice. 966 */ 967 if (size) 968 command(regs, COMMAND_FIFO_CLEAR, ep->num); 969 } 970 971 static struct usb_ep_ops goku_ep_ops = { 972 .enable = goku_ep_enable, 973 .disable = goku_ep_disable, 974 975 .alloc_request = goku_alloc_request, 976 .free_request = goku_free_request, 977 978 .queue = goku_queue, 979 .dequeue = goku_dequeue, 980 981 .set_halt = goku_set_halt, 982 .fifo_status = goku_fifo_status, 983 .fifo_flush = goku_fifo_flush, 984 }; 985 986 /*-------------------------------------------------------------------------*/ 987 988 static int goku_get_frame(struct usb_gadget *_gadget) 989 { 990 return -EOPNOTSUPP; 991 } 992 993 static struct usb_ep *goku_match_ep(struct usb_gadget *g, 994 struct usb_endpoint_descriptor *desc, 995 struct usb_ss_ep_comp_descriptor *ep_comp) 996 { 997 struct goku_udc *dev = to_goku_udc(g); 998 struct usb_ep *ep; 999 1000 switch (usb_endpoint_type(desc)) { 1001 case USB_ENDPOINT_XFER_INT: 1002 /* single buffering is enough */ 1003 ep = &dev->ep[3].ep; 1004 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) 1005 return ep; 1006 break; 1007 case USB_ENDPOINT_XFER_BULK: 1008 if (usb_endpoint_dir_in(desc)) { 1009 /* DMA may be available */ 1010 ep = &dev->ep[2].ep; 1011 if (usb_gadget_ep_match_desc(g, ep, desc, ep_comp)) 1012 return ep; 1013 } 1014 break; 1015 default: 1016 /* nothing */ ; 1017 } 1018 1019 return NULL; 1020 } 1021 1022 static int goku_udc_start(struct usb_gadget *g, 1023 struct usb_gadget_driver *driver); 1024 static int goku_udc_stop(struct usb_gadget *g); 1025 1026 static const struct usb_gadget_ops goku_ops = { 1027 .get_frame = goku_get_frame, 1028 .udc_start = goku_udc_start, 1029 .udc_stop = goku_udc_stop, 1030 .match_ep = goku_match_ep, 1031 // no remote wakeup 1032 // not selfpowered 1033 }; 1034 1035 /*-------------------------------------------------------------------------*/ 1036 1037 static inline const char *dmastr(void) 1038 { 1039 if (use_dma == 0) 1040 return "(dma disabled)"; 1041 else if (use_dma == 2) 1042 return "(dma IN and OUT)"; 1043 else 1044 return "(dma IN)"; 1045 } 1046 1047 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1048 1049 static const char proc_node_name [] = "driver/udc"; 1050 1051 #define FOURBITS "%s%s%s%s" 1052 #define EIGHTBITS FOURBITS FOURBITS 1053 1054 static void dump_intmask(struct seq_file *m, const char *label, u32 mask) 1055 { 1056 /* int_status is the same format ... */ 1057 seq_printf(m, "%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n", 1058 label, mask, 1059 (mask & INT_PWRDETECT) ? " power" : "", 1060 (mask & INT_SYSERROR) ? " sys" : "", 1061 (mask & INT_MSTRDEND) ? " in-dma" : "", 1062 (mask & INT_MSTWRTMOUT) ? " wrtmo" : "", 1063 1064 (mask & INT_MSTWREND) ? " out-dma" : "", 1065 (mask & INT_MSTWRSET) ? " wrset" : "", 1066 (mask & INT_ERR) ? " err" : "", 1067 (mask & INT_SOF) ? " sof" : "", 1068 1069 (mask & INT_EP3NAK) ? " ep3nak" : "", 1070 (mask & INT_EP2NAK) ? " ep2nak" : "", 1071 (mask & INT_EP1NAK) ? " ep1nak" : "", 1072 (mask & INT_EP3DATASET) ? " ep3" : "", 1073 1074 (mask & INT_EP2DATASET) ? " ep2" : "", 1075 (mask & INT_EP1DATASET) ? " ep1" : "", 1076 (mask & INT_STATUSNAK) ? " ep0snak" : "", 1077 (mask & INT_STATUS) ? " ep0status" : "", 1078 1079 (mask & INT_SETUP) ? " setup" : "", 1080 (mask & INT_ENDPOINT0) ? " ep0" : "", 1081 (mask & INT_USBRESET) ? " reset" : "", 1082 (mask & INT_SUSPEND) ? " suspend" : ""); 1083 } 1084 1085 static const char *udc_ep_state(enum ep0state state) 1086 { 1087 switch (state) { 1088 case EP0_DISCONNECT: 1089 return "ep0_disconnect"; 1090 case EP0_IDLE: 1091 return "ep0_idle"; 1092 case EP0_IN: 1093 return "ep0_in"; 1094 case EP0_OUT: 1095 return "ep0_out"; 1096 case EP0_STATUS: 1097 return "ep0_status"; 1098 case EP0_STALL: 1099 return "ep0_stall"; 1100 case EP0_SUSPEND: 1101 return "ep0_suspend"; 1102 } 1103 1104 return "ep0_?"; 1105 } 1106 1107 static const char *udc_ep_status(u32 status) 1108 { 1109 switch (status & EPxSTATUS_EP_MASK) { 1110 case EPxSTATUS_EP_READY: 1111 return "ready"; 1112 case EPxSTATUS_EP_DATAIN: 1113 return "packet"; 1114 case EPxSTATUS_EP_FULL: 1115 return "full"; 1116 case EPxSTATUS_EP_TX_ERR: /* host will retry */ 1117 return "tx_err"; 1118 case EPxSTATUS_EP_RX_ERR: 1119 return "rx_err"; 1120 case EPxSTATUS_EP_BUSY: /* ep0 only */ 1121 return "busy"; 1122 case EPxSTATUS_EP_STALL: 1123 return "stall"; 1124 case EPxSTATUS_EP_INVALID: /* these "can't happen" */ 1125 return "invalid"; 1126 } 1127 1128 return "?"; 1129 } 1130 1131 static int udc_proc_read(struct seq_file *m, void *v) 1132 { 1133 struct goku_udc *dev = m->private; 1134 struct goku_udc_regs __iomem *regs = dev->regs; 1135 unsigned long flags; 1136 int i, is_usb_connected; 1137 u32 tmp; 1138 1139 local_irq_save(flags); 1140 1141 /* basic device status */ 1142 tmp = readl(®s->power_detect); 1143 is_usb_connected = tmp & PW_DETECT; 1144 seq_printf(m, 1145 "%s - %s\n" 1146 "%s version: %s %s\n" 1147 "Gadget driver: %s\n" 1148 "Host %s, %s\n" 1149 "\n", 1150 pci_name(dev->pdev), driver_desc, 1151 driver_name, DRIVER_VERSION, dmastr(), 1152 dev->driver ? dev->driver->driver.name : "(none)", 1153 is_usb_connected 1154 ? ((tmp & PW_PULLUP) ? "full speed" : "powered") 1155 : "disconnected", 1156 udc_ep_state(dev->ep0state)); 1157 1158 dump_intmask(m, "int_status", readl(®s->int_status)); 1159 dump_intmask(m, "int_enable", readl(®s->int_enable)); 1160 1161 if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0) 1162 goto done; 1163 1164 /* registers for (active) device and ep0 */ 1165 seq_printf(m, "\nirqs %lu\ndataset %02x single.bcs %02x.%02x state %x addr %u\n", 1166 dev->irqs, readl(®s->DataSet), 1167 readl(®s->EPxSingle), readl(®s->EPxBCS), 1168 readl(®s->UsbState), 1169 readl(®s->address)); 1170 if (seq_has_overflowed(m)) 1171 goto done; 1172 1173 tmp = readl(®s->dma_master); 1174 seq_printf(m, "dma %03X =" EIGHTBITS "%s %s\n", 1175 tmp, 1176 (tmp & MST_EOPB_DIS) ? " eopb-" : "", 1177 (tmp & MST_EOPB_ENA) ? " eopb+" : "", 1178 (tmp & MST_TIMEOUT_DIS) ? " tmo-" : "", 1179 (tmp & MST_TIMEOUT_ENA) ? " tmo+" : "", 1180 1181 (tmp & MST_RD_EOPB) ? " eopb" : "", 1182 (tmp & MST_RD_RESET) ? " in_reset" : "", 1183 (tmp & MST_WR_RESET) ? " out_reset" : "", 1184 (tmp & MST_RD_ENA) ? " IN" : "", 1185 1186 (tmp & MST_WR_ENA) ? " OUT" : "", 1187 (tmp & MST_CONNECTION) ? "ep1in/ep2out" : "ep1out/ep2in"); 1188 if (seq_has_overflowed(m)) 1189 goto done; 1190 1191 /* dump endpoint queues */ 1192 for (i = 0; i < 4; i++) { 1193 struct goku_ep *ep = &dev->ep [i]; 1194 struct goku_request *req; 1195 1196 if (i && !ep->ep.desc) 1197 continue; 1198 1199 tmp = readl(ep->reg_status); 1200 seq_printf(m, "%s %s max %u %s, irqs %lu, status %02x (%s) " FOURBITS "\n", 1201 ep->ep.name, 1202 ep->is_in ? "in" : "out", 1203 ep->ep.maxpacket, 1204 ep->dma ? "dma" : "pio", 1205 ep->irqs, 1206 tmp, udc_ep_status(tmp), 1207 (tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0", 1208 (tmp & EPxSTATUS_SUSPEND) ? " suspend" : "", 1209 (tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "", 1210 (tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""); 1211 if (seq_has_overflowed(m)) 1212 goto done; 1213 1214 if (list_empty(&ep->queue)) { 1215 seq_puts(m, "\t(nothing queued)\n"); 1216 if (seq_has_overflowed(m)) 1217 goto done; 1218 continue; 1219 } 1220 list_for_each_entry(req, &ep->queue, queue) { 1221 if (ep->dma && req->queue.prev == &ep->queue) { 1222 if (i == UDC_MSTRD_ENDPOINT) 1223 tmp = readl(®s->in_dma_current); 1224 else 1225 tmp = readl(®s->out_dma_current); 1226 tmp -= req->req.dma; 1227 tmp++; 1228 } else 1229 tmp = req->req.actual; 1230 1231 seq_printf(m, "\treq %p len %u/%u buf %p\n", 1232 &req->req, tmp, req->req.length, 1233 req->req.buf); 1234 if (seq_has_overflowed(m)) 1235 goto done; 1236 } 1237 } 1238 1239 done: 1240 local_irq_restore(flags); 1241 return 0; 1242 } 1243 1244 /* 1245 * seq_file wrappers for procfile show routines. 1246 */ 1247 static int udc_proc_open(struct inode *inode, struct file *file) 1248 { 1249 return single_open(file, udc_proc_read, PDE_DATA(file_inode(file))); 1250 } 1251 1252 static const struct file_operations udc_proc_fops = { 1253 .open = udc_proc_open, 1254 .read = seq_read, 1255 .llseek = seq_lseek, 1256 .release = single_release, 1257 }; 1258 1259 #endif /* CONFIG_USB_GADGET_DEBUG_FILES */ 1260 1261 /*-------------------------------------------------------------------------*/ 1262 1263 static void udc_reinit (struct goku_udc *dev) 1264 { 1265 static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" }; 1266 1267 unsigned i; 1268 1269 INIT_LIST_HEAD (&dev->gadget.ep_list); 1270 dev->gadget.ep0 = &dev->ep [0].ep; 1271 dev->gadget.speed = USB_SPEED_UNKNOWN; 1272 dev->ep0state = EP0_DISCONNECT; 1273 dev->irqs = 0; 1274 1275 for (i = 0; i < 4; i++) { 1276 struct goku_ep *ep = &dev->ep[i]; 1277 1278 ep->num = i; 1279 ep->ep.name = names[i]; 1280 ep->reg_fifo = &dev->regs->ep_fifo [i]; 1281 ep->reg_status = &dev->regs->ep_status [i]; 1282 ep->reg_mode = &dev->regs->ep_mode[i]; 1283 1284 ep->ep.ops = &goku_ep_ops; 1285 list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list); 1286 ep->dev = dev; 1287 INIT_LIST_HEAD (&ep->queue); 1288 1289 ep_reset(NULL, ep); 1290 1291 if (i == 0) 1292 ep->ep.caps.type_control = true; 1293 else 1294 ep->ep.caps.type_bulk = true; 1295 1296 ep->ep.caps.dir_in = true; 1297 ep->ep.caps.dir_out = true; 1298 } 1299 1300 dev->ep[0].reg_mode = NULL; 1301 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, MAX_EP0_SIZE); 1302 list_del_init (&dev->ep[0].ep.ep_list); 1303 } 1304 1305 static void udc_reset(struct goku_udc *dev) 1306 { 1307 struct goku_udc_regs __iomem *regs = dev->regs; 1308 1309 writel(0, ®s->power_detect); 1310 writel(0, ®s->int_enable); 1311 readl(®s->int_enable); 1312 dev->int_enable = 0; 1313 1314 /* deassert reset, leave USB D+ at hi-Z (no pullup) 1315 * don't let INT_PWRDETECT sequence begin 1316 */ 1317 udelay(250); 1318 writel(PW_RESETB, ®s->power_detect); 1319 readl(®s->int_enable); 1320 } 1321 1322 static void ep0_start(struct goku_udc *dev) 1323 { 1324 struct goku_udc_regs __iomem *regs = dev->regs; 1325 unsigned i; 1326 1327 VDBG(dev, "%s\n", __func__); 1328 1329 udc_reset(dev); 1330 udc_reinit (dev); 1331 //writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master); 1332 1333 /* hw handles set_address, set_feature, get_status; maybe more */ 1334 writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF 1335 | G_REQMODE_SET_CONF | G_REQMODE_GET_CONF 1336 | G_REQMODE_GET_DESC 1337 | G_REQMODE_CLEAR_FEAT 1338 , ®s->reqmode); 1339 1340 for (i = 0; i < 4; i++) 1341 dev->ep[i].irqs = 0; 1342 1343 /* can't modify descriptors after writing UsbReady */ 1344 for (i = 0; i < DESC_LEN; i++) 1345 writel(0, ®s->descriptors[i]); 1346 writel(0, ®s->UsbReady); 1347 1348 /* expect ep0 requests when the host drops reset */ 1349 writel(PW_RESETB | PW_PULLUP, ®s->power_detect); 1350 dev->int_enable = INT_DEVWIDE | INT_EP0; 1351 writel(dev->int_enable, &dev->regs->int_enable); 1352 readl(®s->int_enable); 1353 dev->gadget.speed = USB_SPEED_FULL; 1354 dev->ep0state = EP0_IDLE; 1355 } 1356 1357 static void udc_enable(struct goku_udc *dev) 1358 { 1359 /* start enumeration now, or after power detect irq */ 1360 if (readl(&dev->regs->power_detect) & PW_DETECT) 1361 ep0_start(dev); 1362 else { 1363 DBG(dev, "%s\n", __func__); 1364 dev->int_enable = INT_PWRDETECT; 1365 writel(dev->int_enable, &dev->regs->int_enable); 1366 } 1367 } 1368 1369 /*-------------------------------------------------------------------------*/ 1370 1371 /* keeping it simple: 1372 * - one bus driver, initted first; 1373 * - one function driver, initted second 1374 */ 1375 1376 /* when a driver is successfully registered, it will receive 1377 * control requests including set_configuration(), which enables 1378 * non-control requests. then usb traffic follows until a 1379 * disconnect is reported. then a host may connect again, or 1380 * the driver might get unbound. 1381 */ 1382 static int goku_udc_start(struct usb_gadget *g, 1383 struct usb_gadget_driver *driver) 1384 { 1385 struct goku_udc *dev = to_goku_udc(g); 1386 1387 /* hook up the driver */ 1388 driver->driver.bus = NULL; 1389 dev->driver = driver; 1390 1391 /* 1392 * then enable host detection and ep0; and we're ready 1393 * for set_configuration as well as eventual disconnect. 1394 */ 1395 udc_enable(dev); 1396 1397 return 0; 1398 } 1399 1400 static void stop_activity(struct goku_udc *dev) 1401 { 1402 unsigned i; 1403 1404 DBG (dev, "%s\n", __func__); 1405 1406 /* disconnect gadget driver after quiesceing hw and the driver */ 1407 udc_reset (dev); 1408 for (i = 0; i < 4; i++) 1409 nuke(&dev->ep [i], -ESHUTDOWN); 1410 1411 if (dev->driver) 1412 udc_enable(dev); 1413 } 1414 1415 static int goku_udc_stop(struct usb_gadget *g) 1416 { 1417 struct goku_udc *dev = to_goku_udc(g); 1418 unsigned long flags; 1419 1420 spin_lock_irqsave(&dev->lock, flags); 1421 dev->driver = NULL; 1422 stop_activity(dev); 1423 spin_unlock_irqrestore(&dev->lock, flags); 1424 1425 return 0; 1426 } 1427 1428 /*-------------------------------------------------------------------------*/ 1429 1430 static void ep0_setup(struct goku_udc *dev) 1431 { 1432 struct goku_udc_regs __iomem *regs = dev->regs; 1433 struct usb_ctrlrequest ctrl; 1434 int tmp; 1435 1436 /* read SETUP packet and enter DATA stage */ 1437 ctrl.bRequestType = readl(®s->bRequestType); 1438 ctrl.bRequest = readl(®s->bRequest); 1439 ctrl.wValue = cpu_to_le16((readl(®s->wValueH) << 8) 1440 | readl(®s->wValueL)); 1441 ctrl.wIndex = cpu_to_le16((readl(®s->wIndexH) << 8) 1442 | readl(®s->wIndexL)); 1443 ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8) 1444 | readl(®s->wLengthL)); 1445 writel(0, ®s->SetupRecv); 1446 1447 nuke(&dev->ep[0], 0); 1448 dev->ep[0].stopped = 0; 1449 if (likely(ctrl.bRequestType & USB_DIR_IN)) { 1450 dev->ep[0].is_in = 1; 1451 dev->ep0state = EP0_IN; 1452 /* detect early status stages */ 1453 writel(ICONTROL_STATUSNAK, &dev->regs->IntControl); 1454 } else { 1455 dev->ep[0].is_in = 0; 1456 dev->ep0state = EP0_OUT; 1457 1458 /* NOTE: CLEAR_FEATURE is done in software so that we can 1459 * synchronize transfer restarts after bulk IN stalls. data 1460 * won't even enter the fifo until the halt is cleared. 1461 */ 1462 switch (ctrl.bRequest) { 1463 case USB_REQ_CLEAR_FEATURE: 1464 switch (ctrl.bRequestType) { 1465 case USB_RECIP_ENDPOINT: 1466 tmp = le16_to_cpu(ctrl.wIndex) & 0x0f; 1467 /* active endpoint */ 1468 if (tmp > 3 || 1469 (!dev->ep[tmp].ep.desc && tmp != 0)) 1470 goto stall; 1471 if (ctrl.wIndex & cpu_to_le16( 1472 USB_DIR_IN)) { 1473 if (!dev->ep[tmp].is_in) 1474 goto stall; 1475 } else { 1476 if (dev->ep[tmp].is_in) 1477 goto stall; 1478 } 1479 if (ctrl.wValue != cpu_to_le16( 1480 USB_ENDPOINT_HALT)) 1481 goto stall; 1482 if (tmp) 1483 goku_clear_halt(&dev->ep[tmp]); 1484 succeed: 1485 /* start ep0out status stage */ 1486 writel(~(1<<0), ®s->EOP); 1487 dev->ep[0].stopped = 1; 1488 dev->ep0state = EP0_STATUS; 1489 return; 1490 case USB_RECIP_DEVICE: 1491 /* device remote wakeup: always clear */ 1492 if (ctrl.wValue != cpu_to_le16(1)) 1493 goto stall; 1494 VDBG(dev, "clear dev remote wakeup\n"); 1495 goto succeed; 1496 case USB_RECIP_INTERFACE: 1497 goto stall; 1498 default: /* pass to gadget driver */ 1499 break; 1500 } 1501 break; 1502 default: 1503 break; 1504 } 1505 } 1506 1507 #ifdef USB_TRACE 1508 VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n", 1509 ctrl.bRequestType, ctrl.bRequest, 1510 le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex), 1511 le16_to_cpu(ctrl.wLength)); 1512 #endif 1513 1514 /* hw wants to know when we're configured (or not) */ 1515 dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION 1516 && ctrl.bRequestType == USB_RECIP_DEVICE); 1517 if (unlikely(dev->req_config)) 1518 dev->configured = (ctrl.wValue != cpu_to_le16(0)); 1519 1520 /* delegate everything to the gadget driver. 1521 * it may respond after this irq handler returns. 1522 */ 1523 spin_unlock (&dev->lock); 1524 tmp = dev->driver->setup(&dev->gadget, &ctrl); 1525 spin_lock (&dev->lock); 1526 if (unlikely(tmp < 0)) { 1527 stall: 1528 #ifdef USB_TRACE 1529 VDBG(dev, "req %02x.%02x protocol STALL; err %d\n", 1530 ctrl.bRequestType, ctrl.bRequest, tmp); 1531 #endif 1532 command(regs, COMMAND_STALL, 0); 1533 dev->ep[0].stopped = 1; 1534 dev->ep0state = EP0_STALL; 1535 } 1536 1537 /* expect at least one data or status stage irq */ 1538 } 1539 1540 #define ACK(irqbit) { \ 1541 stat &= ~irqbit; \ 1542 writel(~irqbit, ®s->int_status); \ 1543 handled = 1; \ 1544 } 1545 1546 static irqreturn_t goku_irq(int irq, void *_dev) 1547 { 1548 struct goku_udc *dev = _dev; 1549 struct goku_udc_regs __iomem *regs = dev->regs; 1550 struct goku_ep *ep; 1551 u32 stat, handled = 0; 1552 unsigned i, rescans = 5; 1553 1554 spin_lock(&dev->lock); 1555 1556 rescan: 1557 stat = readl(®s->int_status) & dev->int_enable; 1558 if (!stat) 1559 goto done; 1560 dev->irqs++; 1561 1562 /* device-wide irqs */ 1563 if (unlikely(stat & INT_DEVWIDE)) { 1564 if (stat & INT_SYSERROR) { 1565 ERROR(dev, "system error\n"); 1566 stop_activity(dev); 1567 stat = 0; 1568 handled = 1; 1569 // FIXME have a neater way to prevent re-enumeration 1570 dev->driver = NULL; 1571 goto done; 1572 } 1573 if (stat & INT_PWRDETECT) { 1574 writel(~stat, ®s->int_status); 1575 if (readl(&dev->regs->power_detect) & PW_DETECT) { 1576 VDBG(dev, "connect\n"); 1577 ep0_start(dev); 1578 } else { 1579 DBG(dev, "disconnect\n"); 1580 if (dev->gadget.speed == USB_SPEED_FULL) 1581 stop_activity(dev); 1582 dev->ep0state = EP0_DISCONNECT; 1583 dev->int_enable = INT_DEVWIDE; 1584 writel(dev->int_enable, &dev->regs->int_enable); 1585 } 1586 stat = 0; 1587 handled = 1; 1588 goto done; 1589 } 1590 if (stat & INT_SUSPEND) { 1591 ACK(INT_SUSPEND); 1592 if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) { 1593 switch (dev->ep0state) { 1594 case EP0_DISCONNECT: 1595 case EP0_SUSPEND: 1596 goto pm_next; 1597 default: 1598 break; 1599 } 1600 DBG(dev, "USB suspend\n"); 1601 dev->ep0state = EP0_SUSPEND; 1602 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1603 && dev->driver 1604 && dev->driver->suspend) { 1605 spin_unlock(&dev->lock); 1606 dev->driver->suspend(&dev->gadget); 1607 spin_lock(&dev->lock); 1608 } 1609 } else { 1610 if (dev->ep0state != EP0_SUSPEND) { 1611 DBG(dev, "bogus USB resume %d\n", 1612 dev->ep0state); 1613 goto pm_next; 1614 } 1615 DBG(dev, "USB resume\n"); 1616 dev->ep0state = EP0_IDLE; 1617 if (dev->gadget.speed != USB_SPEED_UNKNOWN 1618 && dev->driver 1619 && dev->driver->resume) { 1620 spin_unlock(&dev->lock); 1621 dev->driver->resume(&dev->gadget); 1622 spin_lock(&dev->lock); 1623 } 1624 } 1625 } 1626 pm_next: 1627 if (stat & INT_USBRESET) { /* hub reset done */ 1628 ACK(INT_USBRESET); 1629 INFO(dev, "USB reset done, gadget %s\n", 1630 dev->driver->driver.name); 1631 } 1632 // and INT_ERR on some endpoint's crc/bitstuff/... problem 1633 } 1634 1635 /* progress ep0 setup, data, or status stages. 1636 * no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs 1637 */ 1638 if (stat & INT_SETUP) { 1639 ACK(INT_SETUP); 1640 dev->ep[0].irqs++; 1641 ep0_setup(dev); 1642 } 1643 if (stat & INT_STATUSNAK) { 1644 ACK(INT_STATUSNAK|INT_ENDPOINT0); 1645 if (dev->ep0state == EP0_IN) { 1646 ep = &dev->ep[0]; 1647 ep->irqs++; 1648 nuke(ep, 0); 1649 writel(~(1<<0), ®s->EOP); 1650 dev->ep0state = EP0_STATUS; 1651 } 1652 } 1653 if (stat & INT_ENDPOINT0) { 1654 ACK(INT_ENDPOINT0); 1655 ep = &dev->ep[0]; 1656 ep->irqs++; 1657 pio_advance(ep); 1658 } 1659 1660 /* dma completion */ 1661 if (stat & INT_MSTRDEND) { /* IN */ 1662 ACK(INT_MSTRDEND); 1663 ep = &dev->ep[UDC_MSTRD_ENDPOINT]; 1664 ep->irqs++; 1665 dma_advance(dev, ep); 1666 } 1667 if (stat & INT_MSTWREND) { /* OUT */ 1668 ACK(INT_MSTWREND); 1669 ep = &dev->ep[UDC_MSTWR_ENDPOINT]; 1670 ep->irqs++; 1671 dma_advance(dev, ep); 1672 } 1673 if (stat & INT_MSTWRTMOUT) { /* OUT */ 1674 ACK(INT_MSTWRTMOUT); 1675 ep = &dev->ep[UDC_MSTWR_ENDPOINT]; 1676 ep->irqs++; 1677 ERROR(dev, "%s write timeout ?\n", ep->ep.name); 1678 // reset dma? then dma_advance() 1679 } 1680 1681 /* pio */ 1682 for (i = 1; i < 4; i++) { 1683 u32 tmp = INT_EPxDATASET(i); 1684 1685 if (!(stat & tmp)) 1686 continue; 1687 ep = &dev->ep[i]; 1688 pio_advance(ep); 1689 if (list_empty (&ep->queue)) 1690 pio_irq_disable(dev, regs, i); 1691 stat &= ~tmp; 1692 handled = 1; 1693 ep->irqs++; 1694 } 1695 1696 if (rescans--) 1697 goto rescan; 1698 1699 done: 1700 (void)readl(®s->int_enable); 1701 spin_unlock(&dev->lock); 1702 if (stat) 1703 DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat, 1704 readl(®s->int_status), dev->int_enable); 1705 return IRQ_RETVAL(handled); 1706 } 1707 1708 #undef ACK 1709 1710 /*-------------------------------------------------------------------------*/ 1711 1712 static void gadget_release(struct device *_dev) 1713 { 1714 struct goku_udc *dev = dev_get_drvdata(_dev); 1715 1716 kfree(dev); 1717 } 1718 1719 /* tear down the binding between this driver and the pci device */ 1720 1721 static void goku_remove(struct pci_dev *pdev) 1722 { 1723 struct goku_udc *dev = pci_get_drvdata(pdev); 1724 1725 DBG(dev, "%s\n", __func__); 1726 1727 usb_del_gadget_udc(&dev->gadget); 1728 1729 BUG_ON(dev->driver); 1730 1731 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1732 remove_proc_entry(proc_node_name, NULL); 1733 #endif 1734 if (dev->regs) 1735 udc_reset(dev); 1736 if (dev->got_irq) 1737 free_irq(pdev->irq, dev); 1738 if (dev->regs) 1739 iounmap(dev->regs); 1740 if (dev->got_region) 1741 release_mem_region(pci_resource_start (pdev, 0), 1742 pci_resource_len (pdev, 0)); 1743 if (dev->enabled) 1744 pci_disable_device(pdev); 1745 1746 dev->regs = NULL; 1747 1748 INFO(dev, "unbind\n"); 1749 } 1750 1751 /* wrap this driver around the specified pci device, but 1752 * don't respond over USB until a gadget driver binds to us. 1753 */ 1754 1755 static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1756 { 1757 struct goku_udc *dev = NULL; 1758 unsigned long resource, len; 1759 void __iomem *base = NULL; 1760 int retval; 1761 1762 if (!pdev->irq) { 1763 printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev)); 1764 retval = -ENODEV; 1765 goto err; 1766 } 1767 1768 /* alloc, and start init */ 1769 dev = kzalloc (sizeof *dev, GFP_KERNEL); 1770 if (dev == NULL){ 1771 pr_debug("enomem %s\n", pci_name(pdev)); 1772 retval = -ENOMEM; 1773 goto err; 1774 } 1775 1776 spin_lock_init(&dev->lock); 1777 dev->pdev = pdev; 1778 dev->gadget.ops = &goku_ops; 1779 dev->gadget.max_speed = USB_SPEED_FULL; 1780 1781 /* the "gadget" abstracts/virtualizes the controller */ 1782 dev->gadget.name = driver_name; 1783 1784 /* now all the pci goodies ... */ 1785 retval = pci_enable_device(pdev); 1786 if (retval < 0) { 1787 DBG(dev, "can't enable, %d\n", retval); 1788 goto err; 1789 } 1790 dev->enabled = 1; 1791 1792 resource = pci_resource_start(pdev, 0); 1793 len = pci_resource_len(pdev, 0); 1794 if (!request_mem_region(resource, len, driver_name)) { 1795 DBG(dev, "controller already in use\n"); 1796 retval = -EBUSY; 1797 goto err; 1798 } 1799 dev->got_region = 1; 1800 1801 base = ioremap_nocache(resource, len); 1802 if (base == NULL) { 1803 DBG(dev, "can't map memory\n"); 1804 retval = -EFAULT; 1805 goto err; 1806 } 1807 dev->regs = (struct goku_udc_regs __iomem *) base; 1808 1809 pci_set_drvdata(pdev, dev); 1810 INFO(dev, "%s\n", driver_desc); 1811 INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr()); 1812 INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base); 1813 1814 /* init to known state, then setup irqs */ 1815 udc_reset(dev); 1816 udc_reinit (dev); 1817 if (request_irq(pdev->irq, goku_irq, IRQF_SHARED, 1818 driver_name, dev) != 0) { 1819 DBG(dev, "request interrupt %d failed\n", pdev->irq); 1820 retval = -EBUSY; 1821 goto err; 1822 } 1823 dev->got_irq = 1; 1824 if (use_dma) 1825 pci_set_master(pdev); 1826 1827 1828 #ifdef CONFIG_USB_GADGET_DEBUG_FILES 1829 proc_create_data(proc_node_name, 0, NULL, &udc_proc_fops, dev); 1830 #endif 1831 1832 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, 1833 gadget_release); 1834 if (retval) 1835 goto err; 1836 1837 return 0; 1838 1839 err: 1840 if (dev) 1841 goku_remove (pdev); 1842 return retval; 1843 } 1844 1845 1846 /*-------------------------------------------------------------------------*/ 1847 1848 static const struct pci_device_id pci_ids[] = { { 1849 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe), 1850 .class_mask = ~0, 1851 .vendor = 0x102f, /* Toshiba */ 1852 .device = 0x0107, /* this UDC */ 1853 .subvendor = PCI_ANY_ID, 1854 .subdevice = PCI_ANY_ID, 1855 1856 }, { /* end: all zeroes */ } 1857 }; 1858 MODULE_DEVICE_TABLE (pci, pci_ids); 1859 1860 static struct pci_driver goku_pci_driver = { 1861 .name = (char *) driver_name, 1862 .id_table = pci_ids, 1863 1864 .probe = goku_probe, 1865 .remove = goku_remove, 1866 1867 /* FIXME add power management support */ 1868 }; 1869 1870 module_pci_driver(goku_pci_driver); 1871